query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Fonction pour placer un petit marquer de couleur 'col' a la coordonnee (xa,ya)
def create_mark(xa,ya,col): disque = canvas.create_oval(xa-2,ya-2,xa+2,ya+2,fill=col,outline=col) return disque
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def em_coord_turtle(lin, col, dim, tam_celula):\n meio = dim // 2\n x = (col - meio) * tam_celula\n y = (meio - lin) * tam_celula\n return x, y", "def get_xy_position(row, col):\n spacing_x = 86 + 11\n spacing_y = 98 + 8\n top_y = 50\n left_x = 50\n return left_x + col * spacing_x, top_y + row * spacing_y", "def colWithTile(self, pos):\n\n\n return self.colWithBox(pos, [2.0,2.0,2.0])", "def _get_x_y_from_pos(self, col,row): \r\n return (self.margin_left+(self.text_width*col),\r\n self.margin_top+(self.text_height*row))", "def _position_x_to_column(self, x, y):\n col = -1\n if y>self.padding_top and y<self.padding_top+self.len_y_cercles:\n for i in range(self.n_columns):\n if x>self.padding_left+i*63 and x<self.padding_left+i*63+self.diam_cercles:\n col = i+1\n break\n return col", "def _coord(self, x, y):\n gridEdge = 7 # originally 5\n y = gridEdge - y\n cx = 100 * (x - 1) + 50\n cy = 100 * (y - 1) + 50\n r = 20\n return (cx - r, cy - r, cx + r, cy + r)", "def get_cell_coords(self, pt):\n\n\t return int(pt[0] // self.a), int(pt[1] // self.a)", "def rowcol2XY(row,col,CCD):\n pixscale = 0.015 #mm/pix\n X = CCD[1]+1024*pixscale-(col*pixscale+pixscale/2.)\n Y = CCD[2]+2048*pixscale-(row*pixscale+pixscale/2.)\n return X,Y", "def col(self, col: tuple) -> list:\n return self.grid[col::9]", "def coord (i, j):\r\n return j, i", "def collocation_points(self, **kw):\n pass", "def coords_to_node(self,row,col):\n return row*self.cols + col + 1", "def coord_char(coord, matrix):\n row_index, column_index = coord\n\n return matrix[row_index][column_index]", "def set_col( self, col ):\n self.ix_col = col", "def location_to_pos(self,row, col):\r\n\r\n pos_row = str(row + 1)\r\n pos_col = chr(col + 97)\r\n return pos_col + pos_row", "def positionColour(row, col):\n if (row + col) % 2 == 0:\n return BLACK\n else:\n return WHITE", "def col_data_mover_at(row, col):\n if col == 0:\n return NAME_SCHEME[\"memory move\"].format(prefix=f\"l{row}\")\n else:\n return NAME_SCHEME[\"register move right\"].format(pe=f\"pe_{row}_{col - 1}\")", "def spot_coords(self,spot):\n if spot == '1':\n return (330 - 60 ,335 - 15)\n if spot == '2':\n return (419 - 60, 335 - 15)\n if spot == '3':\n return (591 - 60, 159 - 15)\n if spot == '4':\n return (588 - 60, 248 - 15)", "def drawcolumn(self, colnum: int, epc: str, dst: float) -> None:\n svg = self.svg\n wscr, hscr = svg.get_WH()\n w_rect = 50\n colsep = 20\n w_column = w_rect + colsep\n\n xleft = colnum * w_column\n TOP_MARGIN = 20\n MAXH = hscr - TOP_MARGIN\n MAXDIST = 5.0\n h_rect = (dst*MAXH)/MAXDIST\n print(\"CALC dist {}, maxh {}, maxdist {} h_rect {} \".format(dst, MAXH, MAXDIST, h_rect))\n if h_rect > MAXH:\n h_rect = MAXH\n h_rect = int(h_rect)\n ytop = TOP_MARGIN + (MAXH - h_rect)\n print(\"DRAW {}: {} {} {} {}\".format(colnum, xleft, ytop, w_rect, h_rect))\n red_colorstr = '#ff0066'\n blu_colorstr = '#6600ff'\n svg.rect(xleft, ytop, w_rect, h_rect, red_colorstr)\n svg.text(xleft, ytop, blu_colorstr, epc)", "def get_coords_for_col(self, i):\n X = N.zeros((self.rows,2),dtype=config.floatX)\n X[:,0] = self.xmin + float(i) * self.delta_x\n X[:,1] = self.ymin + N.cast[config.floatX](N.asarray(range(self.rows-1,-1,-1))) * self.delta_y\n\n\n return X", "def Pos(row, col):\n return ESC + str(row) + ';' + str(col) + 'H'", "def draw_pixel(x, y, col):\n unicornhathd.set_pixel(x, 12 - y, col[0], col[1], col[2])", "def col_for_cell(self, coords, include_self=False):\n row, col = coords\n return ((r, col) for r in self.rows if include_self or r != row)", "def _modify_columns(self, cols, X, y=None):", "def get_origin(col, row, pattern_size, margin):\n\tw,h = pattern_size\n\torigin = col*(w+margin), row*(h+margin)\n\treturn origin", "def mark(board, player, row, col):\r\n pass", "def coord(self):\n fmt = \"{min_col}{min_row}:{max_col}{max_row}\"\n if (self.min_col == self.max_col\n and self.min_row == self.max_row):\n fmt = \"{min_col}{min_row}\"\n\n return fmt.format(\n min_col=get_column_letter(self.min_col),\n min_row=self.min_row,\n max_col=get_column_letter(self.max_col),\n max_row=self.max_row\n )", "def map_loc_to_pixel((x, y), xc = 17.25, yc = 630, run = 17.25):\n xp, yp = xc + x*run, yc - y*run\n return (xp, yp)", "def coord(self, x, y):\n origin_x = self._raster_meta['transform'][3]\n origin_y = self._raster_meta['transform'][0]\n pixel_x = self._raster_meta['transform'][5]\n pixel_y = self._raster_meta['transform'][1]\n\n x = int((x - origin_x) / pixel_x)\n y = int((y - origin_y) / pixel_y)\n return self[x, y]", "def set_collocation_points(self, X_f):\n self.t = self.tensor(X_f[:,0:1])\n self.x = self.tensor(X_f[:,1:2])\n self.y = self.tensor(X_f[:,2:3])", "def mark(board, player, row, col):\n pass", "def map_grid_loc_to_pixel((grid, x, y), panel_dimensions = bm_panel_dimensions, xc = 17.25, yc = 630, run = 17.25):\n x_offset = 0\n for panel_index, panel_dim in panel_dimensions.iteritems():\n if panel_index < grid:\n width, height = panel_dim\n x_offset += width*xc\n xp, yp = xc + x*run + x_offset, yc - y*run\n return (xp, yp)", "def _havannah_coord_to_canvas_coord(self, coord):\n col, slant = cubic_to_axial(*coord)\n canvas_x, canvas_y = self.CANVAS_CENTER\n\n canvas_x += col * self.HEX_WIDTH // 4 * 3\n canvas_y += (col * self.HEX_WIDTH // 2) + (slant * self.HEX_WIDTH)\n\n return (canvas_x, canvas_y)", "def _add_coordinate_data(self, df, geom_col):\n x = df.apply(self._get_coords,\n geom_col=geom_col,\n coord_type='x',\n axis=1)\n\n y = df.apply(self._get_coords,\n geom_col=geom_col,\n coord_type='y',\n axis=1)\n return x, y", "def __convert_position(self, row_position: int = None, col_position: int = None) -> int:\n if row_position is None or col_position is None:\n return self.__row_position * len(self.__labyrinth[0]) + self.__col_position\n\n return row_position * len(self.__labyrinth[0]) + col_position", "def _to_maze_coord(self, x, y):\n maze = self._get_maze()\n x = int(x / _MAZE_CELL_SIZE)\n y = int(y / _MAZE_CELL_SIZE)\n y = maze.shape[1] - y - 1\n return x, y", "def test_board_coordinates_toXY():\r\n m = Move()\r\n for col_num, col_name in enumerate(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']):\r\n for row in range(1, 9):\r\n assert m.translate_to_xy(col_name + str(row)) == (Board.SIZE - row, col_num)", "def col_to_lon(self, col):\n return -180 + col * self.lon_step", "def create_point(xa,ya,col):\n disque = canvas.create_oval(xa-(rayon),ya-(rayon),xa+(rayon),ya+(rayon),fill=\"white\",outline=col)\n return disque", "def genRowColCell(self, args):\n if len(args) == 2:\n x = int(args[0])\n y = int(args[1])\n if x == 0 and y == 0:\n return self.genMatrix()\n elif x == 0:\n return self.genRow(y - 1)\n elif y == 0:\n return self.genCol(x - 1)\n else:\n return self.genCell(x - 1, y - 1)\n else:\n return self.genMatrix()", "def coordinates(self):", "def xy_to_rowcol(self, x, y):\n col = int((x - self.board_lft_x) / self.next_square)\n row = int((self.board_top_y - y) / self.next_square)\n return [row, col]", "def set_tile(self, row, col, value):\n # replace with your code\n self.grid[row][col] = value", "def seg_row_col(sp) : \n return src_from_rc8x8(sp.peak_y_raw, sp.peak_x_raw)", "def getcellcenter(self,cellx,celly):\n xpos = self.xmargin + cellx*CELLSIZE + CELLSIZE/2\n ypos = self.ymargin + celly*CELLSIZE + CELLSIZE/2\n return (xpos,ypos)", "def compute_row_col(self, x, y):\n if self.is_in_small_cell_area(x, y):\n x = int(math.fabs(x - self.xoffset))\n col = x / self.small.width\n row = (self.small_yoffset - y) / self.small.height\n else:\n col = (x - self.xoffset) / self.large.width\n row = (self.large_yoffset - y) / self.large.height\n return row, col", "def test_if_row_col_well_retrieved_from_mouse_pos(self):\n ui = UIRender(TestUI.image_path)\n row, col = ui.get_row_col_from_mouse((10,25))\n self.assertEqual(row, 0)\n self.assertEqual(col, 0)", "def build_collocation(nr, nz):\n\n rootsr, _, Br, Wr = recur_colloc_symm(nr, 3)\n rootsz, Az, _ = recur_colloc(nz)\n \n return rootsz, Az, rootsr, Br, Wr", "def set(self, row: int, col: int, color: Color) -> None:\n super(ColorGrid, self).set(row, col, color)", "def getCol(self, n, offset=0):\n return self._c[(n*self.__height + offset):((n+1) * self.__height)]", "def _to_world_coord(self, x, y):\n maze = self._get_maze()\n y = maze.shape[1] - y - 1\n return (float(x) + .5) * _MAZE_CELL_SIZE, (float(y) + .5) * _MAZE_CELL_SIZE", "def get_cell_coords(pt):\n\n return int(pt[0] // a), int(pt[1] // a)", "def get_xy(self, x, y):\r\n\t\treturn self.grid[y, x]", "def locate(x, y):\n position(x * 6, y)", "def redraw_col(cls, col):\n # TODO: It's wasteful to make draw_block get block all over again here!\n for _, coord in MapModel.get_column(col, cls.depth):\n cls.draw_block(coord)", "def __init__(self, row = 0, col = 0):\n self.row = row\n self.col = col", "def cell_value(self, x, y):\n if x == 8 and y == 0:\n return \"--\"\n (r, g) = self[(x, y)]\n return \"%s%s\" % (r, g)", "def drawColorColumn(x, yseq, zseq):\n dislin.curvy3(x, yseq, zseq, len(yseq))", "def cols(self, col):\n self.col += col", "def _get_grid_coord(wl, bl):\n row = None\n col = None\n\n for i, (l, h) in QLDbEntry.wl_map.items():\n if wl >= l and wl <= h:\n row = i\n break\n\n for i, (l, h) in QLDbEntry.bl_map.items():\n if bl >= l and bl <= h:\n col = i\n break\n\n return col, row", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value", "def _get_grid_coord(self, point):\n return tuple([int(point[i] / self._cell_length) for i in range(self._dim)])", "def putpixel(self, col, row, color=GREEN):\n if col < 0 or row < 0:\n return\n try:\n self.vram[row][col] = color\n except IndexError:\n pass", "def get_id_from_coor(self, x, y):\n x_coor = x // self._cell_dim\n y_coor = y // self._cell_dim\n return (x_coor, y_coor)", "def node_to_coords(self,node_num):\n row = (node_num - 1) / self.cols\n col = (node_num - 1) % self.cols\n return (row,col)", "def _goto_piece_xy(self, row, col, adjustment_x=0, adjustment_y=0):\n self.pen.up()\n x = (self.board_lft_x + col * (self.next_square) +\n self.square_side_size * .05) + adjustment_x * self.square_side_size\n y = (self.board_top_y - row * (self.next_square) -\n self.square_side_size * .8) - adjustment_y * self.square_side_size\n self.pen.goto(x, y)", "def set_tile(self, row, col, value):\n # replace with your code\n if col < self.grid_height and row < self.grid_width:\n self.board[row][col] = value", "def set_2d_location(self, x, y):\r\n self.unif[42:44] = [x, y]", "def __check_col(self, x: int, y: int) -> bool:\n return not any([self.__maze[x + i, y] for i in (-1, 0, 1)])", "def getCellpos(self, event):\n e = event.widget\n cx, cy = cart(e.canvasx(event.x), e.canvasy(event.y))\n cellx = int(cx) // self.cell_width\n celly = int(cy) // self.cell_height\n return cellx, celly", "def grid_coordinate(row=None, column=None, multiplex=None):\n parts = []\n if row is not None:\n if row >= 24:\n raise NotImplementedError\n parts.append(chr(ord('A')+row))\n if column is not None:\n parts.append(str(column+1))\n if multiplex is not None:\n parts.append(\".%i\" % (multiplex+1))\n return ''.join(parts)", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value;", "def get_row_col(mouse_x, mouse_y):\n # Note: the top row is row=0 (bottom row=2), left col is col=0 (right col=2)\n spacing_x = 86 + 8\n spacing_y = 98 + 5\n top_y = 50\n left_x = 50\n return (mouse_y - top_y) // spacing_y, (mouse_x - left_x) // spacing_x", "def get_index(self, row, col):\n return (row * self.cols) + col", "def __init__(self, x, y):\n self.x = x\n self.y = y\n self.x1 = self.x + 30 # largeur et hauteur fixees\n self.y1 = self.y + 30", "def set_tile(self, row, col, value):\r\n # replace with your code\r\n self._grid_tile[row][col] = value", "def cell_from_xy(self,x,y):\n return self.cell_array.item((x,y))", "def get_coord(self,x,y,z):\n a = 0\n b = 0\n c = 0\n \n distance = 0\n \n while (distance <= x):\n distance += SQUARE_SIZE\n if ( (x - distance) > - (SQUARE_SIZE / 2) ):\n a += 1\n distance = 0\n \n while (distance <= y):\n distance += SQUARE_SIZE\n if ( (y - distance) > - (SQUARE_SIZE / 2) ):\n b += 1\n distance = 0\n \n while (distance <= z):\n distance += SQUARE_SIZE\n if ( (z - distance) > - (SQUARE_SIZE / 2) ):\n c += 1\n distance = 0\n \n return(a,b,c)", "def grid_to_mouse( pos ):\n ix,iy=pos\n px= ix*CELLSIZE + H_CELLSIZE + ix*CELLGAP\n py= iy*CELLSIZE + H_CELLSIZE + iy*CELLGAP\n return (px,py)", "def default_coord(height, width, h_offset=0, w_offset=0):\n coord_mat = np.zeros((height+h_offset, width+w_offset, 2), dtype='int')\n for i in range(height):\n for j in range(width):\n coord_mat[i, j] = (i, j)\n return coord_mat", "def placeCrate (self, crates_char, row, column):", "def print_seg_row_col(sp) : \n s, r, c = src_from_rc8x8(sp.peak_y_raw, sp.peak_x_raw)\n print('seg: %d, row: %.1f, col: %.1f' % (s, r, c))", "def chr_coords(s):\n return max_y - (max_y - min_y)*s", "def schcoords(self, canx, cany):\n # Coordinates of scheme (0,0) corner as canvas coords.\n # Scheme (0,0) is the left, bottom corner (like in mathematic,\n # not like in canvas)\n x0 = (self._cw - self.width)/2\n y0 = (self._ch - self.height)/2 + self.height\n return (canx - x0, y0 - cany)", "def to_coords(self, px, py):\n if px not in range(self.SIZE**2) or py not in range(self.SIZE**2):\n raise IndexError\n return (px // self.SIZE, py // self.SIZE,\n px % self.SIZE, py % self.SIZE)", "def set_tile(self, row, col, value):\r\n # replace with your code\r\n self._cells[row][col] = value", "def get_action_from_col(self, col):\n dumbbell_color = (col // 3) + 1\n to_block = (col % 3) + 1\n return (dumbbell_color, to_block)", "def latlon_2_grid(x, y, z, origin):\n new_y = (y - origin[1]) * 111111\n new_x = (x - origin[0]) * (111111 * np.cos(origin[1] * (np.pi/180)))\n return new_x, new_y, z", "def get_pos(self, cx, cy):\n x = self.min_x + cx*(self.size+0.5)\n y = self.min_y + cy*(self.size+0.5)\n return (x,y)", "def _place_across_col_( self, a_widget, *, columnspan, rowspan, sticky, ):\n# print( f\"_place_across_col_ row = {self.ix_row} col = {self.ix_col}\" )\n # defaulting should be done in place\n # if columnspan is None:\n # columnspan = 1\n\n # if rowspan is None:\n # rowspan = 1\n\n if sticky is None:\n sticky = self.sticky\n\n #rint( f\"_place_across_col_ ({self.ix_col}, {self.ix_row})\"\n # f\"columnspan = {columnspan}\" )\n\n a_widget.grid( row = self.ix_row,\n column = self.ix_col,\n columnspan = columnspan,\n rowspan = rowspan,\n sticky = sticky, )\n\n self.ix_col += columnspan\n if ( self.max > 0 ) and ( self.ix_col >= self.max ):\n print( f\"hit max row {self.max}\" )\n self.ix_row += 1\n self.ix_col = 0\n\n #print(\"_place_across_col_\", self.ix_row, self.ix_col )", "def to_position(self, x, y, i, j):\n return (x * self.SIZE + i, y * self.SIZE + j)", "def indexToPosition(self, col, row):\n columns = \"ABCDEFGH\"\n return columns[col] + str(row + 1)", "def get_pos(self):\n return [self.row, self.col]", "def Haut():\r\n X1, Y1, X2, Y2 = canvas.coords(boule)\r\n canvas.coords(boule,X1,Y1-20,X2,Y2-20)", "def world_coord(self, position, len):\n\n if len > 1:\n x_world = []\n y_world = []\n\n for item in position:\n x_world.append(self.cell_size*item[0]+self.cell_size/2-2)\n y_world.append(self.cell_size*item[1]+self.cell_size/2-6)\n\n else:\n x_world = self.cell_size*position[0]+self.cell_size/2-2\n y_world = self.cell_size*position[1]+self.cell_size/2-6\n\n\n return np.array([x_world, y_world])", "def room_xy(room, x, y, value=None):\n return room[x][y]", "def set_tile(self, row, col, value):\n # replace with your code\n self._cells[row][col] = value", "def getVerticePosition(self):\n #def getvoxelpos(model,scale,dims,translate,i,j,k): #centroid!\n return(self.X,self.Y,self.Z)", "def Pixel2World(geoMatrix, x, y):\r\n ulX = geoMatrix[0]\r\n ulY = geoMatrix[3]\r\n xdist = geoMatrix[1]\r\n ydist = geoMatrix[5]\r\n coorX = (ulX + (x * xdist))\r\n coorY = (ulY + (y * ydist))\r\n return (coorX, coorY)", "def set_square(self, col, row, value):\n row_index = row - 1\n col_index = ord(col.lower()) - 97 # ord('a') is 97\n self.state[row_index][col_index] = value" ]
[ "0.6763953", "0.67502594", "0.6700341", "0.6349989", "0.6342108", "0.62057173", "0.6079546", "0.6056177", "0.6021455", "0.6001277", "0.5997666", "0.5983044", "0.5976914", "0.5969578", "0.5954558", "0.59523946", "0.5932657", "0.5922999", "0.59033656", "0.5902169", "0.5873227", "0.5843462", "0.58274066", "0.58141875", "0.5797523", "0.57962835", "0.57886285", "0.5735973", "0.57336044", "0.57221204", "0.5705836", "0.56898695", "0.56851155", "0.5684894", "0.5679951", "0.5651203", "0.5650224", "0.56485254", "0.564451", "0.56321555", "0.56302834", "0.5623383", "0.5607311", "0.56058437", "0.55877566", "0.5587268", "0.5579469", "0.5568781", "0.55679053", "0.5565803", "0.55593264", "0.55504096", "0.55489874", "0.5544284", "0.5543026", "0.55270493", "0.55255306", "0.55240077", "0.5515869", "0.5513438", "0.5507992", "0.5507992", "0.55068004", "0.5491296", "0.54879284", "0.5479315", "0.5478321", "0.54696167", "0.5462189", "0.5460494", "0.5458293", "0.5447601", "0.54424834", "0.54391664", "0.5437217", "0.54367185", "0.54333097", "0.54236126", "0.54235685", "0.540728", "0.54030704", "0.5399862", "0.5399547", "0.5394232", "0.5390472", "0.5386199", "0.53845525", "0.53741574", "0.5371116", "0.53672665", "0.5366504", "0.5364643", "0.53645533", "0.53608185", "0.53590447", "0.535587", "0.53549397", "0.53491056", "0.5346867", "0.53451544", "0.5344551" ]
0.0
-1
Prend un couple de coordonnees (x,y) et retourne le couple (a,b) tel que le point represente par (x,y) soit dans la case de coin hautgauche de coordonnes (a,b)
def id_case(x,y): try: assert (100<x<1100 and 200<y<700) a=int((x-100)/cote)*cote+100 b=int((y-200)/cote)*cote+200 return (a,b) except AssertionError: print(x,y) print("Le couple a identifier n'est pas dans le rectangle")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _transform_point(self, x, y):\n return (x, y)", "def coord (i, j):\r\n return j, i", "def coordinates(self):", "def coordinate_from_points(pnta: Vector, pntb: Vector,\n pntc: Vector) -> Coordinate:\n pnt = pnta\n vecx = pntb - pnta\n vecxy = pntc - pnta\n return Coordinate(pnt, vecx, vecxy)", "def P_(x, y):\r\n return (x, y)", "def twoRoadConnect(data, x1, y1, x2, y2):\n flag = False\n points = [[x1, y1]]\n if not data[y1][x1] == data[y2][x2]:\n return False, []\n if YRoadConnect(data, x1, y1, x1, y2) and XRoadConnect(data, x2, y2, x1, y2) and data[y2][x1] == 0:\n flag = True\n points.append([x1, y2])\n elif XRoadConnect(data, x1, y1, x2, y1) and YRoadConnect(data, x2, y2, x2, y1) and data[y1][x2] == 0:\n flag = True\n points.append([x2, y1])\n if flag:\n data[y1][x1] = data[y2][x2] = 0\n points.append([x2, y2])\n print(data)\n print(2)\n return flag, points", "def oneRoadConnect(data, x1, y1, x2, y2):\n flag = XRoadConnect(data, x1, y1, x2, y2) or YRoadConnect(data, x1, y1, x2, y2)\n if not data[y1][x1] == data[y2][x2]:\n flag = False\n if data[y1][x1] == 0 and data[y2][x2] == 0:\n flag = False\n if flag:\n data[y1][x1] = data[y2][x2] = 0\n print(data)\n print(1)\n return flag, [[x1, y1], [x2, y2]]", "def cross_2d(origin, a, b):\r\n return geometry.gmCross2D(origin, a, b)", "def __getxyB(x, y):\n\t\treturn x*3+y", "def _maping(x,y,l,a):\n newx = (x**2 *(l* ((x**2 + y**2)**(a/2) - 1) + 2) - l * y**2 *((x**2 + y**2)**(a/2) - 1))/(x**2 + y**2) \n newy = (2 * x* y *(l* ((x**2 + y**2)**(a/2) - 1) + 1))/(x**2 + y**2)\n return newx, newy", "def transition_point(x1, y1, x2, y2):\n return (\n ((x1, y1), True) if abs(x1) > abs(x2) and abs(y1) > abs(y2)\n else ((x2, y2), False))", "def coordinate(self):\n\t\tif self.boldness_coord is None and self.price_coord is None and self.hold_coord is None:\n\t\t\treturn None\n\n\t\treturn (self.boldness_coord, self.price_coord, self.hold_coord)", "def le(self, x, y):", "def point_add(a, b, p, x0, y0, x1, y1):\n #initilise new coordinates\n xr, yr = None, None\n \n #create tuples for the input points\n p1 = (x0,y0)\n p2 = (x1,y1)\n \n #check validity of the points\n try:\n assert is_point_on_curve(a, b, p, x0, y0)\n assert is_point_on_curve(a, b, p, x1, y1)\n except:\n raise Exception('not valid points')\n\n #check curve 4a^3+27b^2 != 0 mod p.\n c0 = a.mod_pow(Bn(3),p)\n c1 = c0.mod_mul(Bn(4),p)\n c2 = b.mod_pow(Bn(2),p)\n c3 = c2.mod_mul(Bn(27),p)\n c = c1.mod_add(c3,p)\n try:\n assert c != 0\n except:\n raise Exception('invalid curve')\n \n #check if points are equal\n try:\n assert p1 != p2\n except:\n raise Exception('EC Points must not be equal') \n \n #checking the points and different cases\n if p1 == (None,None) and p2 == (None, None):\n return (None,None)\n elif (x0 == x1) and (y0.mod_add(y1,p)==0):\n return (None,None)\n elif (x0 == None or y0 == None) and (x1 != None and y1 != None):\n return p2\n elif (x1 == None or y1 == None) and (x0 != None and y0 != None):\n return p1\n \n elif y0 != None and x0 != None and y1 != None and x1 != None:\n #check if the points are valid with an additional check\n #through an exception\n try:\n assert p1 != p2\n assert p1 != (x1,(-y1))\n except:\n raise Exception('EC Points must not be equal')\n if y1 == 0:\n lam0 = -y0\n else: \n lam0 = y1.mod_sub(y0,p) \n if x1 == 0:\n lam1 = -x0\n else:\n lam1 = x1.mod_sub(x0,p)\n \n #condition check if the gradient is 0\n if lam0 == 0 or lam1 == 0:\n xr = -x0.mod_sub(x1,p)\n yr = -y1\n #check if the point is on the curve\n if xr == None or yr == None:\n return (None, None)\n try:\n assert is_point_on_curve(a, b, p, xr, yr)\n except:\n raise Exception('The new point is not valid')\n #do calculations on the numbers that can give valid xr,yr point \n else:\n lam2 = lam1.mod_inverse(p) \n lam = lam0.mod_mul(lam2,p)\n xr0 = lam.mod_pow(Bn(2),p)\n \n xr1 = xr0.mod_sub(x0,p)\n xr = xr1.mod_sub(x1,p)\n \n yr0 = x0.mod_sub(xr,p)\n yr1 = lam.mod_mul(yr0,p)\n yr = yr1.mod_sub(y0,p)\n #check if the new point is valid and if it is then return it\n try:\n assert is_point_on_curve(a, b, p, xr, yr)\n except:\n raise Exception('The new point is not valid')\n #check if any part is None, it may never be!\n if xr == None or yr == None:\n return (None, None)\n return (xr, yr)", "def point_double(a, b, p, x, y):\n xr, yr = None, None\n p1 = (x,y)\n #check the input point for validity\n try:\n assert is_point_on_curve(a, b, p, x, y)\n except:\n raise Exception('not a valid point')\n \n #check curve 4a^3+27b^2 != 0 mod p for validity.\n c0 = a.mod_pow(Bn(3),p)\n c1 = c0.mod_mul(Bn(4),p)\n c2 = b.mod_pow(Bn(2),p)\n c3 = c2.mod_mul(Bn(27),p)\n c = c1.mod_add(c3,p)\n try:\n assert c != 0\n except:\n raise Exception('invalid curve')\n\n #verify the input point\n if p1 == (None,None):\n return (None,None)\n elif p1 == (0,0):\n return (0,0)\n elif y == None or y == 0:\n return (None, None)\n #calculate the new point== doubled point\n else:\n if x == 0:\n xp2 = a\n else:\n xp0 = x.mod_pow(Bn(2),p)\n xp1 = xp0.mod_mul(Bn(3),p)\n xp2 = xp1.mod_add(a,p)\n \n yp0 = y.mod_mul(Bn(2),p)\n \n if yp0 != 0:\n yp = yp0.mod_inverse(p)\n else:\n yp = 0;\n if (xp2 != 0 and yp != 0):\n #calculate gradient if the points are not zero\n lam = xp2.mod_mul(yp,p)\n \n #calculate new x coordinate\n xr0 = lam.mod_pow(Bn(2),p)\n xr1 = x.mod_mul(Bn(2),p)\n xr = xr0.mod_sub(xr1,p)\n \n #calcualte new y coordinate\n yr0 = x.mod_sub(xr,p)\n yr1 = lam.mod_mul(yr0,p)\n yr = yr1.mod_sub(y,p)\n \n if (xr == None or yr == None):\n return (None, None)\n else:\n xr = -x.mod_mul(Bn(2),p)\n yr = -y\n if (xr == None or yr == None):\n return (None, None)\n \n #check whether the new point is valid whcih is passed from the previous if statement\n try:\n assert is_point_on_curve(a, b, p, x, y)\n except:\n raise Exception('The new point is not valid')\n return xr, yr", "def compute_x1_x2_points(point_a: list, point_b: list, nav: navigation.GPSComputing, logger: utility.Logger):\n\n cur_vec_dist = nav.get_distance(point_a, point_b)\n\n # check if moving vector is too small for maneuvers\n if config.MANEUVER_START_DISTANCE * 2 >= cur_vec_dist:\n msg = \"No place for maneuvers; config start maneuver distance is (that will be multiplied by 2): \" + \\\n str(config.MANEUVER_START_DISTANCE) + \" current moving vector distance is: \" + str(cur_vec_dist) + \\\n \" Given points are: \" + str(point_a) + \" \" + str(point_b)\n # print(msg)\n logger.write(msg + \"\\n\")\n return None, None\n\n point_x1 = nav.get_point_on_vector(\n point_a, point_b, config.MANEUVER_START_DISTANCE)\n point_x2 = nav.get_point_on_vector(\n point_a, point_b, cur_vec_dist - config.MANEUVER_START_DISTANCE)\n return point_x1, point_x2", "def distance_entre_deux_points(couple_points_1,couple_points_2):\r\n Xa = couple_points_1[0]\r\n Xb = couple_points_2[0]\r\n Ya = couple_points_1[1]\r\n Yb = couple_points_2[1]\r\n return math.sqrt( ( (Xb-Xa)**2) + ( (Yb-Ya)**2) )", "def _coord(self, x, y):\n gridEdge = 7 # originally 5\n y = gridEdge - y\n cx = 100 * (x - 1) + 50\n cy = 100 * (y - 1) + 50\n r = 20\n return (cx - r, cy - r, cx + r, cy + r)", "def __init__(self, x, y):\n self.x = x\n self.y = y\n self.x1 = self.x + 30 # largeur et hauteur fixees\n self.y1 = self.y + 30", "def latlon_2_grid(x, y, z, origin):\n new_y = (y - origin[1]) * 111111\n new_x = (x - origin[0]) * (111111 * np.cos(origin[1] * (np.pi/180)))\n return new_x, new_y, z", "def liner_cross_point(a1, b1, c1, a2, b2, c2):\n if a1 == 0 or a2 == 0:\n if a2 == 0:\n a1, b1, c1, a2, b2, c2 = a2, b2, c2, a1, b1, c1\n y = - c1 / b1\n x = - (b2 * y + c2) / a2\n elif b1 == 0 or b2 == 0:\n if b2 == 0:\n a1, b1, c1, a2, b2, c2 = a2, b2, c2, a1, b1, c1\n x = - c1 / a1\n y = - (a2 * x + c2) / b2\n else:\n a1, b1, c1 = a1 / b1, b1 / b1, c1 / b1\n a2, b2, c2 = a2 / b2, b2 / b2, c2 / b2\n x = - (c1 - c2) / (a1 - a2)\n y = - a1 * x - c1\n return x, y", "def __getxy(x1, y1, x2, y2):\n\t\treturn x1*27+y1*9+x2*3+y2", "def threeRoadConnect(data, x1, y1, x2, y2):\n temp_data = np.pad(data, (1, 1), 'constant', constant_values=0)\n # init\n points = [[x1, y1]]\n flagX = False\n flagY = False\n if not data[y1][x1] == data[y2][x2]:\n return False, []\n # Two lines parallel to the X-AXIS\n posX = 0\n for i in range(0, 18):\n if temp_data[y1 + 1][i] == 0 and temp_data[y2 + 1][i] == 0:\n if XRoadConnect(temp_data, i, y1 + 1, x1 + 1, y1 + 1) \\\n and XRoadConnect(temp_data, i, y2 + 1, x2 + 1, y2 + 1) \\\n and YRoadConnect(temp_data, i, y1 + 1, i, y2 + 1):\n flagX = True\n posX = i - 1\n if flagX:\n points.append([posX, y1])\n points.append([posX, y2])\n\n # Two lines parallel to the Y-AXIS\n posY = 0\n for i in range(0, 10):\n if temp_data[i][x1 + 1] == 0 and temp_data[i][x2 + 1] == 0:\n if YRoadConnect(temp_data, x1 + 1, i, x1 + 1, y1 + 1) \\\n and YRoadConnect(temp_data, x2 + 1, i, x2 + 1, y2 + 1) \\\n and XRoadConnect(temp_data, x1 + 1, i, x2 + 1, i):\n flagY = True\n posY = i - 1\n if flagY and flagX == False:\n points.append([x1, posY])\n points.append([x2, posY])\n\n if flagX or flagY:\n data[y1][x1] = data[y2][x2] = 0\n points.append([x2, y2])\n print(data)\n print(3)\n return flagX or flagY, points", "def test_coord_preceding_fs(self):", "def correct_point(point: Tuple[int, int]) -> Tuple[int, int]:\n corrected_x = point[0] + WIDTH_START\n corrected_y = point[1] + HEIGHT_START\n return corrected_x, corrected_y", "def _translate_coordinate(self, x1, y1, x2, y2):\n\n return (x1 + x2, y1 + y2)", "def test_coords():\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n\n return x, y", "def point_add(self, a, b):\n\n if a.at_inf and b.at_inf: return point.inf()\n if a.at_inf: return b.dup()\n if b.at_inf: return a.dup()\n if a == b: return self.point_double(a)\n if a.x == b.x and a.y == -b.y: return point.inf()\n\n x1, y1, x2, y2 = modp(self.p, a.x, a.y, b.x, b.y)\n L = (y2 - y1) / (x2 - x1)\n x3 = L ** 2 - x1 - x2\n y3 = L * (x1 - x3) - y1\n return point.xy(int(x3), int(y3))", "def get_pos(x, y):\r\n return normalize(x) // 2, normalize(y) // 4", "def update_coords(self, l, b):\n self.l = l\n self.b = b\n self.ra, self.dec = astLib.astCoords.convertCoords(\n \"GALACTIC\", \"J2000\", self.l, self.b, epoch=2000.)", "def getPoint(self, a):\n lng = self.source.center.lng + (self.target.center.lng - self.source.center.lng) * min(a, 1)\n lat = self.source.center.lat + (self.target.center.lat - self.source.center.lat) * min(a, 1)\n return lng, lat", "def corner_pos(self, connection1, connection2, lastConnection):\n d1=(connection1.this.pos - connection1.other.pos).normalize()\n d2=(connection2.other.pos - connection1.other.pos).normalize()\n w1=self.get_width(connection1,connection1.other)/2\n w2=self.get_width(connection2,connection2.this)/2\n if abs(d1.dot(d2) +1) < 0.0001:\n b=0\n # catch case when it is the end of a single rod\n elif abs(d1.dot(d2) -1) < 0.0001:\n b=0\n return [[w2*rotate(d1,90), w2*rotate(d1,-90)], d1.cross(d2)[2]]\n else:\n if (d1[1]*d2[0]-d1[0]*d2[1])==0:\n raise ValueError(\"connections in the same place\"+str(connection1.this.pos )+\" \"+str(connection1.other.pos)+\" \"+str(connection2.other.pos))\n b = (d2[0]*d1[0]*w2 + w1*d1[0]**2 + w1*d1[1]**2 + w2*d1[1]*d2[1]) / (d1[1]*d2[0]-d1[0]*d2[1])\n# rotate direction can be correct if connection1 &2 are always in same rotational order\n return [ (b*d1 + w2*rotate(d1,90)), d1.cross(d2)[2] ]", "def xytors(x,y):\n\n from math import sqrt\n\n L1 = (sqrt(3.0)*y + 1.0)/3.0\n L2 = (-3.0*x - sqrt(3.0)*y + 2.0)/6.0\n L3 = ( 3.0*x - sqrt(3.0)*y + 2.0)/6.0\n\n r = -L2 + L3 - L1\n s = -L2 - L3 + L1\n\n return r,s", "def d2(x0,y0,x1,y1):\n return (x0-x1)*(x0-x1) + (y0-y1)*(y0-y1)", "def compute_x1_x2_int_points(point_a: list, point_b: list, nav: navigation.GPSComputing, logger: utility.Logger):\n\n cur_vec_dist = nav.get_distance(point_a, point_b)\n\n # check if moving vector is too small for maneuvers\n if config.SPIRAL_SIDES_INTERVAL * 2 >= cur_vec_dist:\n msg = \"No place for maneuvers; Config spiral interval (that will be multiplied by 2): \" + \\\n str(config.SPIRAL_SIDES_INTERVAL) + \" Current moving vector distance is: \" + str(cur_vec_dist) + \\\n \" Given points are: \" + str(point_a) + \" \" + str(point_b)\n if config.VERBOSE:\n print(msg)\n logger.write(msg + \"\\n\")\n return None, None\n\n point_x1_int = nav.get_point_on_vector(\n point_a, point_b, config.SPIRAL_SIDES_INTERVAL)\n point_x2_int = nav.get_point_on_vector(\n point_a, point_b, cur_vec_dist - config.SPIRAL_SIDES_INTERVAL)\n return point_x1_int, point_x2_int", "def afficher_points_2D(set_points):\n X, Y = [p[0][0] for p in set_points], [p[0][1] for p in set_points]\n return(X, Y)", "def __init__(self, coord1, coord2, coordtype, gain, tobs):\n\n if coordtype not in ['eq', 'gal']:\n raise CoordinateException('Wrong coordtype passed to Pointing')\n\n if coordtype == 'eq':\n # assume pointings in decimal degrees\n ra = coord1\n dec = coord2\n\n # convert to l and b :)\n gl,gb = go.radec_to_lb(ra, dec)\n\n if gl>180.:\n gl -= 360.\n\n self.gl = gl\n self.gb = gb\n else:\n if coord1>180.:\n coord1 -= 360.\n\n self.gl = coord1\n self.gb = coord2\n self.tobs = tobs \n self.gain = gain", "def convert_coords(x, y, conversion):\n if conversion == \"cartesian\" :\n # convert to cartesian plane coordinates \n x_new = x - (width/2)\n y_new = (height/2) + y \n\n elif conversion == \"pygame\":\n # only needed to place images in pygame\n x_new = x + (width/2)\n y_new = (height/2) - y\n \n return x_new, y_new", "def corners((u,v)):\r\n return ((u+1,v+1), (u+1,v), (u,v), (u,v+1))", "def xy(self) -> Tuple[float, float]:\n return (self.x, self.y)", "def coords2D(self):\n return (self.x, self.y)", "def pyr_point_translator(x, y, org_l, dest_l):\n dest_x = (2.0 ** (org_l - dest_l)) * x\n dest_y = (2.0 ** (org_l - dest_l)) * y\n return np.array([dest_x, dest_y]).transpose()", "def at_b (self):\n self.argc = int((len(n.coord[0]))/2)\n self.pts_con = np.array(self.coord[:,self.argc:len(n.coord[0])])\n\n self.xd = self.xdi\n self.zd = self.zdi \n \n for i, x in enumerate(self.xdi):\n self.aux_con = self.pts_con[0] - x \n self.arg1 = np.argmin(abs(self.aux_con)) \n \n if (self.aux_con[self.arg1] < 0 and self.arg1 == 0) or (self.aux_con[self.arg1] > 0 and self.arg1 == len(self.aux_con)-1):\n self.yd[i] = 99999.\n #print(self.yd[i],self.arg1)\n #print(self.aux_con)\n \n elif (self.aux_con[self.arg1] > 0 and self.aux_con[self.arg1+1] > self.aux_con[self.arg1]): #(self.aux_con[self.arg1] < 0 and self.aux_con[self.arg1-1] > self.aux_con[self.arg1]) or \n self.yd[i] = 99999.\n #print(self.yd[i],self.arg1)\n #print(self.aux_con)\n \n elif self.aux_con[self.arg1] < 0:\n #print(self.arg1)\n self.arg1 = self.arg1 + self.argc\n self.arg2 = self.arg1 - 1\n self.yd[i] = self.coord[1,n.arg1] + (x-self.coord[0,n.arg1])*(self.coord[1,n.arg2]-self.coord[1,n.arg1])/(self.coord[0,n.arg2]-self.coord[0,n.arg1])\n #print(self.yd[i],self.arg1,self.arg2)\n #print(self.aux_con)\n\n elif self.aux_con[self.arg1] > 0:\n #print(self.arg1) \n self.arg1 = self.arg1 + self.argc\n self.arg2 = self.arg1 + 1\n self.yd[i] = self.coord[1,n.arg1] + (x-self.coord[0,n.arg1])*(self.coord[1,n.arg2]-self.coord[1,n.arg1])/(self.coord[0,n.arg2]-self.coord[0,n.arg1]) \n #print(self.yd[i],self.arg1,self.arg2)\n #print(self.aux_con)\n \n #print('Defensa {0}\\n{1}: {2}\\n{3}: {4}'.format(i,self.arg1,self.aux_con[self.arg1],self.arg2,self.aux_con[self.arg2])) \n \n #self.yd = self.yd\n self.b = np.array([self.xd,self.yd,self.zd])\n #self.b.loc[:,('y')] = self.b.loc[:,('y')] ", "def sumito_three_to_two(context: GUI, old_coordinates, new_coordinates, enemy_start_coordinates, enemy_end_coordinates):\n current_piece = context.board.board_dict[old_coordinates[0]].piece\n enemy_piece = context.board.board_dict[enemy_start_coordinates[0]].piece\n empty_coordinates = [coord for coord in old_coordinates if coord not in new_coordinates]\n\n for coord in enemy_end_coordinates:\n context.board.board_dict[coord].piece = enemy_piece\n\n for coord in new_coordinates:\n context.board.board_dict[coord].piece = current_piece\n\n for coord in empty_coordinates:\n context.board.board_dict[coord].piece = None", "def composite(c, r):\n x, y = gta * (c, r)\n lat, lon = transform.TransformPoint(x, y)[:2]\n if not -90 <= lat <= 90:\n raise ValueError('illegal lat value, did you switch coordinates')\n return lon, lat", "def get_point_on(self, s):\n\n x = self.n1.x * (1 - s) + self.n2.x * s\n y = self.n1.y * (1 - s) + self.n2.y * s\n z = self.n1.z * (1 - s) + self.n2.z * s\n\n return [x, y, z]", "def from_pts(one, two):\n\t\treturn Vec2(two[0] - one[0], two[1] - one[1])", "def transform(self, ((a, b), (c, d))=((1, 1), (-1, 1)), aligned_with_grid=False):\n (x0, y0), (x1, y1) = self.vertices\n return type(self)((int(a * x0 + c * y0), int(b * x0 + d * y0)),\n (int(a * x1 + c * y1), int(b * x1 + d * y1)),\n aligned_with_grid=aligned_with_grid)", "def __init__(self, a, b):\n\t\tself.a = a\n\t\tself.b = b\n\t\tself.left = min(self.a[0],self.b[0])\n\t\tself.right = max(self.a[0],self.b[0])\n\t\tself.top = min(self.a[1],self.b[1])\n\t\tself.bottom = max(self.a[1],self.b[1])\t\n\t\tself.points = (\n\t\t\tVector2d(self.left, self.top),\n\t\t\tVector2d(self.right, self.top),\n\t\t\tVector2d(self.right, self.bottom),\n\t\t\tVector2d(self.left, self.bottom)\n\t\t)", "def connection_point(x, f, p):\n y = collision_point(x, f, p)\n if not p(y):\n return y\n return convergent_point(x, f(y), f)", "def get(self):\n return (self.x,self.y);", "def get_intersection_point(l1, l2):\n m, b = l1\n n, c = l2\n # Find when mx + b = nx + c\n # mx - nx = c - b\n # And...\n x = (c-b) / (m-n)\n # Then plug back in\n y = m*x + b\n return (x, y)", "def points_at_x(self, x):\n x, = modp(self.p, x)\n rhs = x ** 3 + x * self.a + self.b\n y = rhs.sqrt()\n return point.xy(int(x), int(y)), point.xy(int(x), -int(y))", "def __one_both_open(x, y, c = None, l = None):\n return x - 1, y - 1", "def convergent_point_guarded(x0, x1, y, f):\n d0 = distance(x0, y, f)\n d1 = distance(x1, y, f)\n if d0 < d1:\n x1 = power_unary(x1, d1 - d0, f)\n elif d1 < d0:\n x0 = power_unary(x0, d0 - d1, f)\n return convergent_point(x0, x1, f)", "def dist_to_point(self, point):\n\t\treturn dist_to_line2d_seg((self.a.to_tuple(),self.b.to_tuple()), point.to_tuple())", "def coordinate_pairs(lat_axis, lon_axis):\n \n lon_mesh, lat_mesh = numpy.meshgrid(lon_axis, lat_axis) # This is the correct order\n \n return lat_mesh.flatten(), lon_mesh.flatten()", "def __randomize_coord((ref_x, ref_y)):\n radius = numpy.random.normal(scale=DataGen.stdev_distance)\n angle = random.uniform(0, 2 * math.pi)\n rand_x = ref_x + radius * math.cos(angle)\n rand_y = ref_y + radius * math.sin(angle)\n return rand_x, rand_y", "def validate_points(a, b):\r\n\tdiff_y = b[0] - a[0]\r\n\tdiff_x = b[1] - a[1]\r\n\r\n\treturn (diff_y == 0 and diff_x != 0) or (diff_x == 0 and diff_y != 0) or abs(diff_x) == abs(diff_y)", "def dof_1r_to_point(link, center, from_pt, to_pt, axis, axis_1, axis_2):\n\n return", "def test_point_relations(p1, p2):\n assert p1.left_of(p2) or p1.x >= p2.x\n assert p1.is_right_of(p2) or p1.x <= p2.x\n\n assert p1.left_of(p2) == p2.is_right_of(p1) or p1.x == p2.x\n assert not p1.left_of(p2) or not p1.is_right_of(p2)\n assert not p2.left_of(p1) or not p2.is_right_of(p1)", "def _increase_coordinates(coordinates, x, y):\n orig_x, orig_y = coordinates[\"x\"], coordinates[\"y\"]\n coordinates[\"x\"], coordinates[\"y\"] = orig_x + x, orig_y + y", "def __init__(self, x, y):\r\n self.x=x\r\n self.y=y", "def as_point(self):\n return round(self.x), round(self.y)", "def calc_point_squre_dist(point_a, point_b):\n distx = point_a[0] - point_b[0]\n disty = point_a[1] - point_b[1]\n return distx ** 2 + disty ** 2", "def hasher((x1, y1), (x0, y0)=(0,0)):\n return _point_hash((x0, y0)) ^ _point_hash((x0, y1)) ^ _point_hash((x1, y0)) ^ _point_hash((x1, y1))", "def test_coords_same_direction(self): # test_change_coords = method\n mi = (0,1,1.5708)\n mj = (0,2,1.5708)\n result = new_mj_coords(mi, mj)\n self.assertEqual(result, (0.3317021649341794, 0.9433841602327115, 0.0))\n\n '''\n the method .assertEqual(a,b) is equivalent to a == b\n other methods include: .assertIs(a,b) = a is b, .assertIsNone(x) = x is None,\n .assertIn(a,b) = a in b, and .assertIsInstance(a,b) = isinstance(a, b)\n\n\n '''", "def __init__(self, x, y, u):\n self.x = x\n self.y = y\n self.u = u", "def _get_pt_tuple(pnt1, pnt2):\n return tuple(map(_map_x_dim(tuple(pnt1)), pnt2))", "def _barycentricXY(a,p1,p2,p3):\n x=a[0]\n y=a[1]\n\n x1=p1[0]\n y1=p1[1]\n\n x2=p2[0]\n y2=p2[1]\n\n x3=p3[0]\n y3=p3[1]\n\n denom=(y2-y3)*(x1-x3) + (x3-x2)*(y1-y3)\n\n if abs(denom) < epsilon:\n raise ValueError('degenerate triangle in barycentricXY call')\n \n lam1 = ((y2-y3)*(x-x3) + (x3-x2)*(y-y3))/denom\n lam2 = ((y3-y1)*(x-x3) + (x1-x3)*(y-y3))/denom\n lam3 = 1.0-(lam1+lam2)\n\n return [lam1,lam2,lam3]", "def point_double(self, a):\n x1, y1 = modp(self.p, a.x, a.y)\n L = (3 * x1 ** 2 + self.a) / (2 * y1)\n x3 = L ** 2 - 2 * x1\n y3 = L * (x1 - x3) - y1\n return point.xy(int(x3), int(y3))", "def _middle_point(p1, p2):\n x = int((p1.x + p2.x) / 2)\n y = int((p1.y + p2.y) / 2)\n return (x, y)", "def to_cartesian(self):\n w = 1.73205 # sqrt(3)\n h = 2\n dx = 0.5 * w if self.y % 2 == 1 else 0\n x = 0.5 * w + self.x * w + dx\n y = 0.5 * h + 0.75 * self.y * h\n return (x, y)", "def __init__(self, x: int, y: int, w: int, h: int):\n self.x1 = x\n self.y1 = y\n self.x2 = x + w\n self.y2 = y + h", "def point_location(tri, p): \n simplex_index = tri.find_simplex(p)\n bc = []\n for id_, point in zip(simplex_index, p):\n # Calculate the two first barycentric coordinates for the relevant\n # simplex\n b = tri.transform[id_, :2].dot(point-tri.transform[id_, 2])\n bc.append(np.c_[np.atleast_2d(b), 1-b.sum()])\n # Create the full array and squeeze the shit out of it\n bc = np.array(bc).squeeze()\n return simplex_index, bc", "def secondorder(self):\n f = self.img\n x = self.x\n y = self.y\n self.x2 = sum(f*x**2)/sum(f) - self.x1**2\n self.y2 = sum(f*y**2)/sum(f) - self.y1**2\n self.xy = sum(f*x*y)/sum(f) - self.x1*self.y1", "def single_point_crossover(a: np.ndarray, b: np.ndarray, point: int) -> Tuple[np.ndarray, np.ndarray]:\n\n new_a = np.concatenate((a[:point+1], b[point+1:]))\n new_b = np.concatenate((b[:point+1], a[point+1:]))\n return new_a, new_b", "def coordinates(self) -> Tuple[int, int]:\n return self.x, self.y", "def _get_surrounding(self, x, y):\n coords = (\n (x, y - 1),\n (x + 1, y),\n (x, y + 1),\n (x - 1, y),\n )\n return filter(lambda i: bool(i[0]), [\n (self._get_none(a, b), (a, b))\n for a, b in coords\n ])", "def line_param(point_a, point_b, t):\n new_point = point_a - point_b\n return point_b + t*new_point", "def coords_sub(cell_one, cell_two):\n row_diff = cell_one[0] - cell_two[0]\n col_diff = cell_one[1] - cell_two[1]\n return row_diff, col_diff", "def set_points(self, pt1, pt2):\n (x1, y1) = pt1.as_tuple()\n (x2, y2) = pt2.as_tuple()\n self.left = min(x1, x2)\n self.top = min(y1, y2)\n self.right = max(x1, x2)\n self.bottom = max(y1, y2)", "def __init__(self, x0, y0, x1, y1):\n\n self.x0 = x0\n self.y0 = y0\n self.x1 = x1\n self.y1 = y1", "def get_id_from_coor(self, x, y):\n x_coor = x // self._cell_dim\n y_coor = y // self._cell_dim\n return (x_coor, y_coor)", "def __init__(self, x1, y1):\n self.x = x1\n self.y = y1", "def getCoord(self):\n return (self.birth, self.death)", "def _coord(xend, yend):\n x = np.random.randint(0, xend)\n y = np.random.randint(0, yend)\n return x, y", "def XRoadConnect(data, x1, y1, x2, y2):\n flag = True\n if not y1 == y2:\n return False\n x_start = min(x1, x2)\n x_end = max(x1, x2)\n for i in range(x_start + 1, x_end):\n if not data[y1][i] == 0:\n flag = False\n break\n return flag", "def cross(o, a, b):\r\n xo, yo = o\r\n xa, ya = a\r\n xb, yb = b\r\n return (xa - xo)*(yb - yo) - (ya - yo)*(xb - xo)", "def extra_coords(self) -> ExtraCoordsABC:", "def ED(X,Y):", "def pointfind2(plat, plon, lat, lon, pdif=1):\n\n\tdist_min = 1000000.\n\t\n\t\n\tfor i in range(lon.shape[0]):\n\t\tfor j in range(lon.shape[1]):\n\t\t\tdist = Ngl.gc_dist(plat,plon,lat[i,j],lon[i,j])\n\t\t\tif dist_min > dist:\n\t\t\t\tdist_min = dist\n\t\t\t\ti_min = i\n\t\t\t\tj_min = j\n\t\t\t\tlat_min = lat[i,j]\n\t\t\t\tlon_min = lon[i,j]\n\t\n\tprint(i_min,j_min,lat_min,lon_min)\n\tgg1 = i_min, j_min\n\t\n\treturn(gg1, lat_min, lon_min)", "def distance(coord_a: tuple or list, coord_b: tuple or list):\n return math.hypot(coord_b[0] - coord_a[0], coord_b[1] - coord_a[1])", "def _fix_coords(x, y):\n if x.ndim != 1 or all(x < x[0]): # skip 2D arrays and monotonic backwards data\n return x, y\n lon1 = x[0]\n filter_ = x < lon1\n while filter_.sum():\n filter_ = x < lon1\n x[filter_] += 360\n return x, y", "def get_line_to(self, point):\n\n b = ((self.x - point.x)*point.y - (self.y - point.y)*point.x)/(self.x - point.x)\n\n a = (self.y - point.y)/(self.x - point.x)\n\n return a, b", "def addpoint(x, y, xnew, ynew, nVertices2):\n xnew[nVertices2] = x\n ynew[nVertices2] = y\n# print('in add point',nVertices2,xnew[nVertices2],ynew[nVertices2],x,y)\n nVertices2 = nVertices2 + 1\n\n return nVertices2", "def relativize_coordinates(self):\n if len(self.nodes) + len(self.connecting) < 1:\n return\n smallest_c = (self.nodes+self.connecting)[0].c\n for node in self.nodes+self.connecting:\n if node.c < smallest_c:\n smallest_c = node.c\n for node in self.nodes+self.connecting:\n node.c = node.c - smallest_c", "def generate_interpolated_points(point1, point2):\n points = connect(np.array([point2, point1]))\n return set(map(tuple, points))", "def vectorize(point_a:tuple, point_b:tuple)->tuple:\n return (point_b[0] - point_a[0], point_b[1] - point_a[1])", "def YRoadConnect(data, x1, y1, x2, y2):\n flag = True\n if not x1 == x2:\n return False\n y_start = min(y1, y2)\n y_end = max(y1, y2)\n for i in range(y_start + 1, y_end):\n if not data[i][x1] == 0:\n flag = False\n break\n return flag" ]
[ "0.64014715", "0.6380654", "0.62387294", "0.61907715", "0.61892194", "0.6171595", "0.6140475", "0.61289203", "0.6101151", "0.60843295", "0.60767776", "0.602639", "0.6021461", "0.6011172", "0.6002909", "0.5987593", "0.59744483", "0.5946929", "0.59399027", "0.5933556", "0.59327227", "0.59287393", "0.592009", "0.5910652", "0.59078085", "0.5890872", "0.5884876", "0.5879267", "0.58762395", "0.58650625", "0.5817119", "0.5817095", "0.57932705", "0.5790487", "0.57823205", "0.5777128", "0.57743204", "0.5762437", "0.575639", "0.57407576", "0.57373923", "0.5725548", "0.5706348", "0.5705788", "0.56999874", "0.5681217", "0.56804705", "0.56736016", "0.56633157", "0.5661928", "0.56609124", "0.56568944", "0.5654501", "0.56414443", "0.5634512", "0.5632255", "0.56313", "0.5623978", "0.5617022", "0.5616833", "0.56112784", "0.56096226", "0.5609402", "0.5598806", "0.55978745", "0.55932903", "0.559315", "0.557636", "0.55756795", "0.557466", "0.5563618", "0.55628026", "0.55486226", "0.55453175", "0.55410564", "0.55390257", "0.553831", "0.55380195", "0.5530706", "0.55255896", "0.55244994", "0.5521777", "0.552007", "0.55195916", "0.5512676", "0.551203", "0.5502433", "0.5500608", "0.549974", "0.5498804", "0.54981744", "0.5497565", "0.549488", "0.54932934", "0.54890656", "0.5488848", "0.54885346", "0.5488182", "0.54834014", "0.54831463" ]
0.57720083
37
Fonction qui renvoie les disques qui le touchent directement Prend en argument un disque Retourn la liste des disques a son contact / liste vide si aucun
def C(v,securite): to_return = set() x,y = l[v][0],l[v][1] a,b = id_case(x,y) #on recupere la case ou se trouve le disque qu'on test voisinage = set(cases[a,b]) #on recupere la liste du voisinage (pas forcement contact) #4 #012 #345 #678 if a>100: voisinage = add_list(voisinage,cases[a-4*rayon,b]) #3 if b>200: voisinage = add_list(voisinage,cases[a-4*rayon,b-4*rayon]) #0 voisinage = add_list(voisinage,cases[a,b-4*rayon]) #1 if b<600: voisinage = add_list(voisinage,cases[a-4*rayon,b+4*rayon]) #6 voisinage = add_list(voisinage,cases[a,b+4*rayon]) #7 if a<1100-4*rayon: voisinage = add_list(voisinage,cases[a+4*rayon,b]) #5 if b>200: voisinage = add_list(voisinage,cases[a+4*rayon,b-4*rayon]) #2 voisinage = add_list(voisinage,cases[a,b-4*rayon]) #1 if b<600: voisinage = add_list(voisinage,cases[a+4*rayon,b+4*rayon]) #8 voisinage = add_list(voisinage,cases[a,b+4*rayon]) #7 #On ajoute plusieurs fois le meme a un ensemble -> pas grave for i in voisinage: xb,yb = l[i][0],l[i][1] if 0<sqrt((x-xb)**2+(y-yb)**2)<=2*rayon+securite: to_return.add(i) return to_return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def receiveContactList(self, contactList):", "def controlList(user):\n insListe = Inserito.objects.filter(user=user, cancellato=False).select_related('listaAttesa')\n for l in insListe:\n numPosti = l.listaAttesa.corso.cap - l.listaAttesa.corso.posti_prenotati\n if numPosti > 0:\n testo = \"Si è liberato un posto per il corso \"+l.listaAttesa.corso.nome+ \" che si tiene il \"+ str(l.listaAttesa.corso.data) + \" alle \"+ str(l.listaAttesa.corso.ora_inizio)\n noreply = User.objects.get(username='noreply')\n notifica = Messaggio(userMittente=noreply, userDestinatario=user, data_ora=datetime.datetime.today(), text=testo)\n checkEx = Messaggio.objects.filter(userMittente=noreply, userDestinatario=user, text=testo).exists()\n if not checkEx:\n notifica.save()\n changeListRecord = Inserito.objects.get(user=user, listaAttesa=l.listaAttesa)\n changeListRecord.cancellato = True\n changeListRecord.save()", "def enchere(self):\n\n i = 0\n while i < 5 and self.annonce < 4:\n paroleJ = self.joueurs[i].parler(self.annonce)\n if paroleJ != 0:\n self.annonce = paroleJ\n self.indiceJoueurQuiPrend = i\n i += 1\n\n print(\"joueur qui prend : \" + str(self.indiceJoueurQuiPrend))\n if self.indiceJoueurQuiPrend != -1:\n print(\"annonce : \" + str(self.annonce))\n if self.annonce == 1 or self.annonce == 2:\n self.joueurs[self.indiceJoueurQuiPrend].possedeChien = True\n self.joueurs[self.indiceJoueurQuiPrend].construireChien()\n self.debuterPartie()\n\n else:\n self.finirPartie()", "def scraper_notizie(self, contenuto_articoli: list):\n tot_menzioni = []\n for articolo in contenuto_articoli:\n # estraggo qualsisasi frase che menziona il giocatore\n sel_regex = f\"[\\w ,;()'’-]+{self.name}[\\w ,;()'’-]+\"\n results = re.findall(sel_regex, articolo)\n\n for res in results:\n # rimuovo il caso in cui sia solo in un elenco, come ad inizio articoli su ATTACCO\n if not re.search(f\", {self.name},\", res):\n tot_menzioni.append(res)\n if len(tot_menzioni) > 0:\n self.news = \"• \" + \"<br>•\".join(tot_menzioni)", "def on_btnLista_clicked(self,guardar):\n XerarInformes()", "def afficher(self):\n bordRect = (self.pos[0]-5, self.pos[1]-5, self.dim[0]+5, self.dim[1]+5)\n Fond = pygame.draw.rect(self.ecran.surface, self.ecran.couleur, bordRect, 0) # Efface le precedant text\n\n rang = 0\n verif = \"\"\n compteur = 0\n self.lignes = []\n if self.txt == \"\": self.txt = \" \"\n \n while verif != self.txt:\n verif =\"\"\n rang += self.correction(self.txt[rang:], compteur)\n compteur += 1\n for k in self.lignes:\n verif += k.txt\n\n for compteur in range(len(self.lignes)):\n self.lignes[compteur].afficher()\n\n self.dim = (self.dim[0], self.hLigne*(compteur+1)) # +1 -> Boucle for\n \n pygame.display.flip()", "def retireSommet(self, sommet):\r\n nouveauGraphe = copy.deepcopy(self) # on effectue une copie du graphe\r\n nouveauGraphe.n = self.n-1 # On a n-1 points\r\n # NB: il faut aussi changer m et listeArretes mais on va pas le faire tout de suite car pas urgent\r\n # 1. On suprrime la ligne d'indice sommet\r\n #* AUTRE MÉTHODE del nouveauGraphe.adjMatrix[sommet]\r\n # print(nouveauGraphe.adjMatrix)\r\n nouveauGraphe.adjMatrix.pop(sommet)\r\n # print(nouveauGraphe.adjMatrix)\r\n #2. On supprime la colonne d'indice sommet = on supprime l'index sommet de chaque sous liste\r\n # la liste comprehension ne marche pas bien :(\r\n for line in nouveauGraphe.adjMatrix:\r\n line.pop(sommet)\r\n # print(nouveauGraphe.adjMatrix)\r\n # nouveauGraphe.m = 0\r\n # 2ème méthode:\r\n # for ligne in nouveauGraphe.adjMatrix:\r\n # ligne.pop(sommet)\r\n return nouveauGraphe", "def modification_de_couleur(var,y):\n for i in range(len(Couleurs)): #Permet de savoir à quel valeur\n if L[i] == var: #de la liste la personne est en\n break #arrivant.\n while True:\n rectangle(410,y,437,y+17,'black','#A9A9A9')\n rectangle(410,y+23,437,y+40,'black','#A9A9A9')\n fleche(424,y+20,424,y+5,'black',2)\n fleche(424,y+30,424,y+35,'black',2)\n x2,y2,z2=attente_clic()\n if 410<=x2<=437:\n if y<=y2<=y+17:\n i += 1\n elif y+23<=y2<=y+40:\n i -=1\n else:\n return Couleurs[i],Couleurs2[i]\n if i >= 12:\n i = 0\n if i < 0:\n i = 11\n cercle(100,120,10,Couleurs[i],Couleurs2[i])\n cercle(120,120,10,Couleurs[i],Couleurs2[i])\n cercle(140,120,10,Couleurs[i],Couleurs2[i])", "def chercherChemin(self):\n\n \n liste=self._circuit.vue(self.x,self.y,self.rayonVision)\n \n listeSuppr=[]\n couche_vehicule= self._circuit.Couche_vehicules\n \n for case in liste :\n #on élimine les cases infranchissbles les cases qui ne sont pas sur le chemin à suivre \n\n if self._circuit.numeroWayPoint(case[0],case[1])==0 or ( self._circuit.numeroWayPoint(self.x,self.y)!=self._circuit.lastWayPoint and self._circuit.numeroWayPoint(case[0],case[1])<= self._circuit.numeroWayPoint(self.x,self.y)) or( self._circuit.numeroWayPoint(case[0],case[1])>= 5*self._circuit.numeroWayPoint(self.x,self.y) and self._circuit.numeroWayPoint(self.x,self.y)!=0) or ( self._circuit.numeroWayPoint(self.x,self.y)==self._circuit.lastWayPoint and self._circuit.numeroWayPoint(case[0],case[1])== self._circuit.numeroWayPoint(self.x,self.y)) or self._circuit.plateau[case[1],case[0],couche_vehicule]!=None:#on élimine les points derrière\n \n listeSuppr.append(case)\n\n \n for case in listeSuppr:\n \n liste.remove(case)\n \n if len(liste)>=1:\n l=liste[0]\n\n for nour in liste :\n \n if distance((self.x,self.y),(l[0],l[1])) > distance((self.x,self.y),(nour[0],nour[1])):\n l=nour\n pasx=0\n pasy=0\n if self.x<l[0] : \n pasx=1\n elif self.x>l[0] :\n pasx=-1\n if self.y<l[1] : \n pasy=1\n elif self.y>l[1] :\n pasy=-1\n debug.dprint(\" id {} {}:({},{}) Waypoint {} Point:({},{}) WayPoint {} vitesse :{} reservoir:{}\".format(self.id,self.typeV,self.x,self.y,self._circuit.numeroWayPoint(self.x,self.y),l[0],l[1],self._circuit.numeroWayPoint(l[0],l[1]),self.vitesse,self.reservoir))\n self.orientation=atan2(pasy,pasx)\n\n self.vitesse=1\n\n debug.dprint(self) \n \n super().deplacer()\n \n\n self.rayonVision=4\n else :# on augemente le rayon de vision au cas ou toutes les cases sont occupées ou non franchissables\n self.rayonVision*=3", "def modifier_classement_joueur_tournoi(self, joueurs_tournoi, championnat, rapport):\r\n rapport.affichage_classement_championnat(championnat)\r\n championnat = sorted(championnat, key=lambda x: x.classement) # tri joueurs du championnat par classement\r\n print(\"Veuillez indiquer le numéro du joueur à modifier:\")\r\n choix = int(input())\r\n if choix <= len(championnat): # test si choix numero joueur valide\r\n index = choix - 1 # car liste commence a 0\r\n joueur = championnat[index]\r\n nouveau_joueur = copy.deepcopy(joueur)\r\n print(\"Veuillez indiquer le nouveau classement de \" + joueur.nom)\r\n nouveau_classement = int(input())\r\n nouveau_joueur.classement = nouveau_classement\r\n championnat.remove(joueur) # enleve ancienne position du joueur dans classement\r\n joueurs_tournoi.remove(joueur) # enleve ancienne position du joueur dans tournoi\r\n championnat.append(nouveau_joueur) # ajoute joueur avec classement actualise\r\n joueurs_tournoi.append(nouveau_joueur) # ajoute joueur classement actualise dans liste participants tournoi\r\n return joueurs_tournoi, championnat\r\n else:\r\n print(\"Numero joueur invalide\")\r\n return", "def __calcular_mejores_disparos(self, participantes):\n mejores_disparos = []\n for participante in participantes:\n mejor_puntaje = 999\n for disparo in participante['disparos']:\n if disparo < mejor_puntaje:\n mejor_puntaje = disparo\n participante['mejor_disparo'] = mejor_puntaje\n mejores_disparos.append(participante)\n return mejores_disparos", "def contactListClicked(self):\n \n contacts = self.userList.getSelectedItems()\n self.mergeButton.setEnabled(contacts != None and len(contacts) > 1)\n \n if contacts != None and len(contacts) == 1:\n self.messageList.filterByContact(contacts[0])\n else:\n self.messageList.removeFilter()", "def callNurse():\n triageRoom.append(waitingRoom.pop(0))\n #sort(triageRoom,key=patient.triageNumer)", "def test_listes():\n listes = [Liste(mot) for mot in (\"SE\", \"PAS\", \"DE\", \"DEVIS\")]\n data_tycat(listes)\n _ = input()\n print(\"on ajoute listes[0] apres liste[1], puis un mot vide\")\n listes[1].suffixe(listes[0])\n listes[1].suffixe(Liste(\"\"))\n data_tycat(listes)\n _ = input()\n print(\"on ajoute listes[1] apres listes[2] et listes[0] apres listes[3]\")\n listes[2].suffixe(listes[1])\n listes[3].suffixe(listes[0])\n data_tycat(listes)\n _ = input()\n print(\"on efface 'DEVIS'\")\n del listes[3]\n data_tycat(listes)\n _ = input()\n # # test dans le cas où le doublage ne se fait pas à la tête de la liste\n # print(\"on efface 'DEPASSE'\")\n # del listes[2]\n # data_tycat(listes)\n # _ = input()\n print(\"on ajoute 'NT' apres 'PASSE'\")\n listes[1].suffixe(Liste(\"NT\"))\n data_tycat(listes)\n _ = input()\n print(\"on ajoute 'SE' apres elle-meme\")\n listes[0].suffixe(listes[0])\n data_tycat(listes)\n # # supression de SE\n # _ = input()\n # print(\"on efface 'SE'\")\n # del listes[0]\n # data_tycat(listes)", "def atender(self):\n\n if self.enfila>0: #Para que atiendan solamente si e que hay alguien en la fila\n\n self.enfila-=1\n self.fila.pop(0) #Saco primer elemento de la fila (Atienden al primer cliente)", "def T(v,securite):\n to_return = {} #renvoie le dictionnaire {indice du contact (0 -> direct / sinon -> plus ou moins direct) : set({disque})} \n Cv = set(C(v,securite))\n Tv = set(Cv)\n i=0\n xv,yv=l[v][0],l[v][1]\n while Cv != set() and i<5:\n to_return[str(i)]=Cv\n new_Cv = set()\n for j in Cv:\n xj,yj=l[j][0],l[j][1]\n #si j est devant v, on ne le copte pas\n if sqrt((xj-xt)**2+(yj-yt)**2)<sqrt((xv-xt)**2+(yv-yt)**2):\n continue\n new_Cv= new_Cv.__or__(C(j,securite).__sub__(Tv.__or__(set(j).__or__({v}))))\n Tv = Tv.__or__(new_Cv)\n Cv = new_Cv\n i+=1\n return to_return", "def mostrar_mejores_disparos(self):\n participantes = self.__disparos.copy()\n mejores_disparos = self.__calcular_mejores_disparos(participantes)\n for mejor_disparo in mejores_disparos:\n print(\n f\"\"\"\n =================================\n ====== PARTICIPANTE Nº: {mejor_disparo['nroParticipante']} ======\n =================================\n Disparos: {mejor_disparo['disparos']},\n Nombre: {mejor_disparo['nombre']},\n Apellido: {mejor_disparo['apellido']},\n Mejor disparo: {mejor_disparo['mejor_disparo']}\n =================================\n =================================\n \"\"\"\n )", "def comando_listagem(self):\r\n if not args.data and not args.nnf and not args.chave:\r\n parser.error(\"informe a opcao -d, -n ou -c para listagem de notas.\")\r\n\r\n\tif args.data:\r\n # Usuario nao autorizado\r\n self.busca_por_data(args.data[0], args.data[1])\r\n return\r\n\r\n if not args.serie:\r\n parser.error(\"informe a opcao -s para serie\")\r\n\r\n\tif args.nnf:\r\n if args.bloco: # processa as notas em bloco por causa de erros de\r\n\t\t # Segmentation fault\r\n listagem = self.busca_por_nnf_bloco(args.nnf[0], args.nnf[1], \r\n\t\t\t args.serie, args.bloco)\r\n # Criar formatacao para as outras buscas\r\n\t # Em caso de Exception: A listagem nao esta sendo atualizada.\r\n if args.irregular: # Inclui notas nao utilizadas\r\n # python /u1/caixa/nfce.py -p listagem -n 256 270 -s 80 -ib 1\r\n sequencia = [] # Lista contendo numeros de notas utilizadas\r\n for nota in listagem:\r\n sequencia.append(int(nota[\"docnumero\"]))\r\n serie = listagem[0][\"docserie\"] \r\n notas_nao_utilizadas = self.sequencia_nao_utilizada(serie, sequencia)\r\n for nota in notas_nao_utilizadas:\r\n listagem.append(nota) # Append das notas nao utilizadas\r\n \r\n #diretorio = None\r\n self.formata_listagem(listagem)\r\n else:\r\n self.busca_por_nnf(args.nnf[0], args.nnf[1], args.serie)\r\n return\r\n\r\n\tif args.chave:\r\n self.busca_por_chave(args.chave, args.serie)", "def on_touch_up(self, touch):\n if not self.destroyed:\n from ZoneUtilisateur import ZoneUtilisateur\n if self.collide_point(touch.x, touch.y) and self.parent is not None and self.support != \"tablette\" and not self.validated:\n for child in self.parent.children:\n if child.__class__ == ZoneUtilisateur and child.collide_point(self.center[0], self.center[1]) and child.is_connected():\n data = '{\"Criterion\" : \"' + self.texte + '\", \"IdUser\" : \"' + str(\n self.createur.identifier) + '\", \"TextType\" : \"' + self.text_type + '\", \"Links\" : ['\n for link in self.links:\n data += '{ \"IdImage\" :\"' + str(link.id_img) + '\",'\n data += '\"SrcImage\" : \"' + self.parent.get_animal(link.id_img).src_image + '\",'\n data += '\"IdUser\" :\"' + str(link.id_usr) + '\",'\n data += '\"Distance\" :\"' + str(link.distance) + '\",'\n data += '\"Angle\" :\"' + str(link.angle) + '\"},'\n if len(self.links) > 0:\n data = data[:-1]\n\n data += '], \"Fusionneurs\" : ['\n for participants in self.fusionneurs:\n data += '{\"IdUser\" : \"' + str(participants.identifier) + '\"},'\n if len(self.fusionneurs) > 0 :\n data = data[:-1]\n data += ']}\\n'\n self.parent.server.send_msg(data, child.user.socket)\n self.canvas.clear()\n self.destroyed = True\n self.parent.criterions.remove(self)\n self.parent.remove_widget(self)\n\n Scatter.on_touch_up(self, touch)", "def get_contacts_list(self):\n contacts = self.driver.find_elements_by_class_name(\"_1wjpf\")\n s= [contact.text for contact in contacts] #extracts chats and last messsages\n print (\"get contacts: \"+str(s)) #print only chat names\n return s[::2] #returns only chat names", "def get_discus_for_comment(id_article, id_comment):\n discus_id_list = list() # result id list - for easier calculations\n discus_obj_list = list() # list with Comment obj\n\n all_comments_by_article_obj = Comment.objects.filter(article=id_article).order_by('create')\n adjacent_list = list(zip(\n list(\n all_comments_by_article_obj.values_list('reply_to_comment', flat=1).filter(reply_to_comment__isnull=False)),\n list(all_comments_by_article_obj.values_list('id', flat=1).filter(reply_to_comment__isnull=False))\n ))\n\n def dfs(comment_id):\n for i in adjacent_list:\n if (comment_id in i) and (i[1] not in discus_id_list):\n discus_id_list.append(i[1])\n\n discus = Comment.objects.get(id=i[1])\n discus_obj_list.append(discus)\n\n dfs(i[1])\n\n dfs(id_comment)\n\n if len(discus_id_list) == 0:\n return None\n else:\n # return discus_id_list\n return discus_obj_list", "def get_only_wanted(self, datas_percorrer, index_data_1):\n \n # Dicionario de datas guardadas, chamando a function\n # OrderedDict() que lembra a ordem de cada item\n datas_guardadas = OrderedDict()\n \n # Sub_dicionario para datas/ano, chamando a function\n # OrderedDict() que lembra a ordem de cada item\n datas_guardadas_ano = OrderedDict()\n\n # Sub_dicionario para datas/mes, chamando a function\n # OrderedDict() que lembra a ordem de cada item\n datas_guardadas_mes = OrderedDict()\n \n # Lista com o nome dos meses\n meses_ano = [\n 'Janeiro',\n 'Fevereiro',\n 'Março',\n 'Abril',\n 'Maio',\n 'Junho',\n 'Julho',\n 'Agosto',\n 'Setembro',\n 'Outubro',\n 'Novembro',\n 'Dezembro'\n ]\n\n # Lista com o nome dos dias da semana\n dias_semana = [\n 'Domingo',\n 'Segunda-Feira',\n 'Terça-Feira',\n 'Quarta-Feira',\n 'Quinta-Feira',\n 'Sexta-Feira',\n 'Sábado',\n ]\n \n # lista de filtro dos dias desejados\n dias_desejados = [ 0, 2, 6]\n \n # lista contendo o numero do primeiro dia do mes\n # em relação ao numero total de dias no ano\n primeiro_dia_mes = [\n #Primeiro dia de cada mês\n 0, 31, 59, 90,\n 120, 151, 181, 212,\n 243, 273, 304, 334\n ]\n \n # lista de meses com 30 dias \n meses_trinta = [ 4, 6, 9, 11 ]\n \n # Esta variavel trará 31 dias para os não\n # estiverem a lista 'meses_trinta'\n numero_dias_mes = 31\n \n # Numero do dia atual\n numero_dia_ano = primeiro_dia_mes[self.mes -1] + self.dia\n \n # Cria variaveis para trabalhar com dia, mes, ano\n # e index para a lista 'dias_semana'\n dia_atual = self.dia\n mes_atual = self.mes\n ano_atual = self.ano\n sendo_dia = index_data_1\n # Variável para ano bissexto\n se_bissexto = False\n # Verifica se ano é bissexto\n if (ano_atual %4 == 0 and ano_atual %100 != 0):\n se_bissexto = True\n elif ano_atual %400 == 0:\n se_bissexto = True\n else:\n se_bissexto = False\n\n # Nome mes atual\n nome_mes_atual = ''\n \n # Inicia loop para filtrar dias\n for dia_passado in range(0, datas_percorrer + 1):\n\n #Da nome ao mes\n nome_mes_atual = meses_ano[mes_atual - 1]\n \n # Verifica se mes atual esta na lista meses_trinta\n # se true, o mes tem 30 dias\n if mes_atual in meses_trinta:\n numero_dias_mes = 30\n # Se o mes atual é = 2 (fevereiro), o mes possui 28 dias\n elif mes_atual == 2:\n numero_dias_mes = 28\n # Porem se for bissexto, o mes tem 29 dias.\n if se_bissexto == True:\n numero_dias_mes = 29\n else:\n numero_dias_mes = 31\n \n # Verifica se a data passa no filtro 'dias desejados'\n if sendo_dia in dias_desejados:\n # Concatena chave\n chave_dia_mes = str(dia_atual)\n #chave_dia_mes += '/' + str(mes_atual)\n # Concatena valor\n valor_semana = dias_semana[sendo_dia]\n # Guarda as datas no dicionario mes\n datas_guardadas_mes[chave_dia_mes] = valor_semana\n\n # Adiciona uma unidade no numero_do_dia\n # na data atual e no index do dia\n numero_dia_ano += 1\n dia_atual += 1\n sendo_dia += 1\n # Cria ou adiciona o dicionario mes no dicionario de ano\n datas_guardadas_ano[nome_mes_atual] = datas_guardadas_mes\n # Cria ou adiciona o dicionario ano no dicionario geral\n datas_guardadas[ano_atual] = datas_guardadas_ano\n \n # Se o index após a adição for > 6, retorna 0\n if sendo_dia > 6:\n sendo_dia = 0\n \n # Se o dia atual for maior que o numero total\n # de dias do mes, retorna dia primeiro do mes seguinte\n if dia_atual > numero_dias_mes:\n dia_atual = 1\n mes_atual += 1\n datas_guardadas_mes = OrderedDict()\n # Se o mes > 12, retorna janeiro, primeiro do ano seguinte\n if mes_atual > 12:\n mes_atual = 1\n numero_dia_ano = 1\n ano_atual += 1\n datas_guardadas_ano = OrderedDict()\n # Verifica se ano seguinte é bissexto\n if (ano_atual %4 == 0 and ano_atual %100 != 0):\n se_bissexto = True\n elif ano_atual %400 == 0:\n se_bissexto = True\n else:\n se_bissexto = False\n \n return(datas_guardadas)", "def question(dico):\n l = []\n for i in range(len(dico)):\n l.append(dico[i][0])\n affichage_question(dico,l)", "def envoi_par_mail(self):\n cr , uid, context = self.env.args\n if not self.pool['res.users'].has_group(cr, uid, 'is_plastigray.is_comptable_group'):\n raise Warning(u\"Accès non autorisé !\")\n ids=[]\n for obj in self:\n ids.append(str(obj.id))\n if len(ids)>0:\n SQL=\"\"\"\n select ai.is_mode_envoi_facture, ai.partner_id, ai.name, ai.id\n from account_invoice ai\n where \n ai.id in(\"\"\"+','.join(ids)+\"\"\") and \n ai.is_date_envoi_mail is null and \n ai.is_mode_envoi_facture like 'mail%'\n order by ai.is_mode_envoi_facture, ai.partner_id, ai.name\n \"\"\"\n cr.execute(SQL)\n result = cr.fetchall()\n\n # ** Un mail par client*********************************************\n partners={}\n for row in result:\n if row[0]=='mail_client':\n partner_id = row[1]\n id = row[3]\n if not partner_id in partners:\n partners[partner_id]=[]\n partners[partner_id].append(id)\n #*******************************************************************\n\n\n # ** Un mail+BL par client******************************************\n for row in result:\n if row[0]=='mail_client_bl':\n partner_id = row[1]\n id = row[3]\n if not partner_id in partners:\n partners[partner_id]=[]\n partners[partner_id].append(id)\n #*******************************************************************\n\n\n #** Envoi des mails par partner ************************************\n for partner_id in partners:\n ids=partners[partner_id]\n self._envoi_par_mail(partner_id, ids)\n #*******************************************************************\n\n\n # ** Un mail par facture *******************************************\n for row in result:\n if row[0] in ['mail', 'mail_regroupe_bl']:\n partner_id = row[1]\n id = row[3]\n self._envoi_par_mail(partner_id, [id])\n #*******************************************************************\n\n\n # ** Un mail par facture en double exemplaire **********************\n for row in result:\n if row[0]=='mail2':\n partner_id = row[1]\n id = row[3]\n self._envoi_par_mail(partner_id, [id])\n #*******************************************************************", "def nao_quer_beber(self, cliente):\n self.vazio.acquire()\n with self.lock:\n self.buff_n_quer.append(cliente)\n # libera lara um garcom retira-lo\n self.cheio.release()", "def findSommetsConnexeTo(self, origine, notVisited):\r\n notVisited.remove(origine) # on retire le sommet des non visités\r\n # print(self.adjMatrix)\r\n for voisin, weight in enumerate(self.adjMatrix[origine]): # Pour chaque voisin de ce point\r\n if weight !=0 and voisin in notVisited: # On y est connecté et on ne l'a pas encore vu\r\n self.findSommetsConnexeTo(voisin, notVisited) # On répète le processus pour ce point\r", "def contacts_list_update(self):\n\t\tself.database.contacts_clear()\n\t\tclient_log.debug(f'Запрос контакт листа для пользователся {self.name}')\n\t\treq = {\n\t\t\tACTION: GET_CONTACTS,\n\t\t\tTIME: time.time(),\n\t\t\tUSER: self.username\n\t\t}\n\t\tclient_log.debug(f'Сформирован запрос {req}')\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tans = get_message(self.transport)\n\t\tclient_log.debug(f'Получен ответ {ans}')\n\t\tif RESPONSE in ans and ans[RESPONSE] == 202:\n\t\t\tfor contact in ans[LIST_INFO]:\n\t\t\t\tself.database.add_contact(contact)\n\t\telse:\n\t\t\tclient_log.error('Не удалось обновить список контактов.')", "def _do_alle_meine_sprueche(self, chat_id, user_id, args, update):\n spruchlist = sorted(self.db.get_sprueche(user_id), key=lambda s: (s.active, s.time), reverse=True)\n user_name = update[\"message\"][\"from\"][\"first_name\"]\n \n if spruchlist == []:\n self.tclient.send_message('Ich habe noch keinen Nasenspruch von dir gespeichert, {}.'.format(user_name), user_id)\n else:\n messageList = []\n for spruch in spruchlist:\n messageList.append('<i>{}</i>{}'.format(spruch.text, ' (aktiv)' if spruch.active == 1 else ''))\n \n message = '\\n'.join(messageList) \n self.tclient.send_message(message, user_id)", "def Lluiteu(self) -> IResultList:\n\n if len(self._Lluitadors) != 2:\n print(\"ERROR. Falten lluitadors\")\n exit\n\n elQuePica = randint(0, 1)\n\n while self._Lluitadors[0].es_Ko() == False and self._Lluitadors[1].es_Ko() == False:\n elQueRep = (elQuePica+1) % 2\n proteccio = self._Lluitadors[elQueRep].get_Lluitador().Protegeix()\n pica = self._Lluitadors[elQuePica].get_Lluitador().Pica()\n\n if pica in proteccio:\n self._Lluitadors[elQueRep].treu_vida()\n print(\n f'{self._Lluitadors[elQueRep].get_nom()} rep un cop al {pica.name} de {self._Lluitadors[elQuePica].get_nom()}')\n else:\n print(\n f'{self._Lluitadors[elQueRep].get_nom()} atura el cop al {pica.name} de {self._Lluitadors[elQuePica].get_nom()}')\n elQuePica = elQueRep\n\n guanyador = next(x for x in self._Lluitadors if x.es_Ko() == False)\n perdedor = next(i for i in self._Lluitadors if i.es_Ko() == True)\n\n comentariLocutor = \"\"\n\n if (guanyador.get_vida() - perdedor.get_vida()) > 5:\n comentariLocutor = \"Quina pallissa!!\"\n\n print(f\"{perdedor.get_nom()} cau a terra!\")\n print(f\"VICTÒRIA DE {guanyador.get_nom()}!!! {comentariLocutor}\")\n\n return self._Lluitadors", "def diminish(self):\n for n in self.notes:\n n.diminish()", "def delete(self, index):\n if not 1 <= index <= self.count: #de index moet waardig zijn\n return False\n if self.isEmpty() is True: #als de lijst leeg is, kan je niet verwijderen\n return False\n if self.getLength() == 1: #speciaal geval, nog maar 1 item, verwijder door self.head.next er niet meer naar te laten wijzen\n self.head.next = None\n if index == 1: #als index == 1, dan moet dus het laatste item weg, dus moet men helemaal de ketting doorlopen om prev te vinden\n index = self.count + 1 #had evengoed range(0, index (zonder + 1)) kunnen doen, het doel is om naar het voorlaatste item te gaan\n current = self.head.next\n prev = self.head\n for teller in range(1, index): #zoek 'voorlaatste' (eigenlijk laatste, maar visueel eerder 'voorlaatste' item, want deze wijst naar het laatste\n current = current.next\n prev = prev.next\n self.head.next = prev\n prev.next = current.next #current moet eigenlijk weg, dus zetten we de pointer van de vorige op het item waar current naar wijst\n #op deze manier wordt current eigenlijk 'ovegeslagen'\n else:\n current = self.head.next\n prev = self.head\n for teller in range(1, index):\n current = current.next\n prev = prev.next\n prev.next = current.next\n\n\n self.count -= 1\n return True", "def cellules(self): # itérateur rendu safe\n cellule_courante = self.tete\n while cellule_courante is not None:\n cellule_suivante = cellule_courante.suivant # sauvegarde\n yield cellule_courante\n cellule_courante = cellule_suivante # récupération de la sauvegarde", "def analyse_donnees(self, mere, foetus, pere, log):\n concordance_mf = 0\n concordance_pf = None\n if len(pere) != 0:\n concordance_pf = 0\n log = log + \"Père détecté.................................\\n\"\n log = log + \"\\n\\nVérification concordance des ADNs entre père et foetus..............................\\n\"\n for Alleles in range(len(foetus)):\n for Allele_Foe in range(3):\n if foetus[Alleles].allele[Allele_Foe] in pere[Alleles].allele:\n if foetus[Alleles].allele[Allele_Foe] != 0.0:\n pere[Alleles].concordance_pere_foetus = \"OUI\"\n concordance_pf = concordance_pf + 1\n log = log + \"Concordance pour marqueur \" + str(\n foetus[Alleles].marqueur) + \" OK..................\\n\"\n break\n else:\n pere[Alleles].concordance_pere_foetus = \"NON\"\n log = log + \"Concordance pour marqueur \" + foetus[\n Alleles].marqueur + \" PAS OK..............\\n\"\n break\n log = log + \"\\n\\nVérification concordance des ADNs entre mère et foetus..............................\\n\"\n for Alleles in range(len(foetus)):\n for Allele_Foe in range(3):\n if foetus[Alleles].allele[Allele_Foe] in mere[Alleles].allele:\n if foetus[Alleles].allele[Allele_Foe] != 0.0:\n foetus[Alleles].concordance_mere_foetus = \"OUI\"\n concordance_mf = concordance_mf + 1\n log = log + \"Concordance pour marqueur \" + str(\n foetus[Alleles].marqueur) + \" OK..................\\n\"\n break\n else:\n foetus[Alleles].concordance_mere_foetus = \"NON\"\n log = log + \"Concordance pour marqueur \" + foetus[Alleles].marqueur + \" PAS OK..............\\n\"\n break\n log = log + \"Vérification concordance des ADns terminée..................................\\n\\n\\n\"\n if concordance_mf != len(foetus):\n resultats, conclusion = self.resultat(concordance_mf, concordance_pf, foetus, mere, pere)\n log = log + \"Concordance des ADNs PAS OK....................\\n\"\n log = log + \"Erreur dans l'échantillon...................\\n\"\n log = log + \"Revérifier s'il vous plaît.............\\n\"\n return resultats, conclusion, log\n else:\n log = log + \"Traitement des 15 autres marqueurs..............................\\n\"\n for nbre_lignes in range(1, len(mere)):\n log = log + \"Traitement du marqueur \" + str(foetus[nbre_lignes].marqueur) + \"..........\\n\"\n pic = foetus[nbre_lignes].foetus_pics()\n log = log + \"Calcul du nombre d'allèles pour le foetus......................\\n\"\n log = log + \"Nombre d'allèles pour le foetus : \" + str(pic) + \".........\\n\"\n log = log + \"Vérification de l'homozygotie de la mère......................\\n\"\n mere[nbre_lignes].homozygotie()\n log = log + \"Mère homozygote : \" + str(mere[nbre_lignes].homozygote) + \"...............\\n\"\n log = log + \"Vérification mère et foetus mêmes allèles......................\\n\"\n foetus[nbre_lignes].allele_semblable(mere[nbre_lignes])\n log = log + \"Code de retour vérification allèles semblables: \" + str(\n foetus[nbre_lignes].informatif) + \"...............\\n\"\n log = log + \"Initialisation du taux de contamination pour calcul à venir...............\\n\"\n foetus[nbre_lignes].taux = 0.0\n log = log + \"Taux initialisé.................................\\n\"\n log = log + \"Si code informatif de retour allèles semblables différent de 2, vérification écho.............\\n\"\n log = log + \"Si écho, affection code informatif 3...............\\n\"\n if foetus[nbre_lignes].informatif != 2:\n log = log + \"Vérification si écho......................\\n\"\n mere[nbre_lignes].echo(foetus[nbre_lignes])\n log = log + \"Code retour vérification écho : \" + str(\n foetus[nbre_lignes].informatif) + \"...............\\n\"\n log = log + \"Début chaîne de traitement...........................\\n\"\n if pic == 3:\n log = log + \"Trois allèles détectés......................\\n\"\n foetus[nbre_lignes].contamination_heterozygote(mere[nbre_lignes])\n log = log + \"Marqueur informatif, affectation du code contamination 1..............\\n\"\n foetus[nbre_lignes].informatif = 1\n log = log + \"Calcul taux de contamination du marqueur..........\\n\"\n foetus[nbre_lignes].contamination = 2\n log = log + \"Calcul terminé....................\\n\"\n elif mere[nbre_lignes].homozygote:\n log = log + \"Mère homozygote.......................\\n\"\n log = log + \"Marqueur non informatif, affectation du code informatif 0............\\n\"\n foetus[nbre_lignes].informatif = 0\n elif pic == 2:\n log = log + \"Deux allèles détectés..............\\n\"\n if foetus[nbre_lignes].informatif == 2:\n log = log + \"Si mêmes allèles, vérification homozygote contaminé...............\\n\"\n foetus[nbre_lignes].verif_homozygote_contamine(self)\n if foetus[nbre_lignes].contamination == 1:\n log = log + \"Homozygote contaminé identifié.....................\\n\"\n log = log + \"Calcul du taux de contamination....................\\n\"\n foetus[nbre_lignes].homozygote_contamine(self)\n log = log + \"Calcul du taux de contamination effectué...........\\n\"\n else:\n if foetus[nbre_lignes].informatif != 3:\n log = log + \"Code calcul écho différent de 3..................\\n\"\n log = log + \"Marqueur informatif, affectation du code informatif 1.............\\n\"\n foetus[nbre_lignes].informatif = 1\n log = log + \"Marqueur non contaminé, affectation du code contamination 0................\\n\"\n foetus[nbre_lignes].contamination = 0\n else:\n log = log + \"Un seul allèle détecté............\\n\"\n if foetus[nbre_lignes].informatif != 3:\n log = log + \"Code informatif différent de 3...........\\n\"\n log = log + \"Marqueur informatif, affectation du code informatif 1.............\\n\"\n foetus[nbre_lignes].informatif = 1\n log = log + \"Marqueur non contaminé, affectation du code contamination 0................\\n\"\n foetus[nbre_lignes].contamination = 0\n log = log + \"\\n\\n\"\n log = log + \"Calcul échantillon contaminé ou non......\\n\"\n log = log + \"Marqueur contaminé si >\" + str(self.seuil_taux_conta) + \".......\\n\"\n log = log + \"Echantillon contaminé si plus de \" + str(\n self.seuil_nbre_marqueurs) + \"marqueurs contaminés...\\n\"\n self.conclusion_echantillon(foetus)\n log = log + \"Calcul échantillon terminé.....\\n\"\n log = log + \"Fin de traitement...........\\n\"\n resultats, conclusion = self.resultat(concordance_mf, concordance_pf, foetus, mere, pere)\n return resultats, conclusion, log", "def affichage_creation_tournoi():\n nom = \"\"\n lieu = \"\"\n date = \"\"\n nb_tours = 4\n joueurs = []\n temps = \"\"\n note = \"\"\n\n print(\"\\n---------------------------\")\n while len(nom) == 0:\n try:\n nom = str(input(\"\\nNom : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un nom valide.\")\n sl(2)\n continue\n\n print(\"\\n---------------------------\")\n while len(lieu) == 0:\n try:\n lieu = str(input(\"\\nLieu : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un lieu valide.\")\n sl(2)\n continue\n\n print(\"\\n---------------------------\")\n while len(date) == 0:\n try:\n date = str(input(\"\\nDate\\nFormat : jj/mm/aaaa : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi une date valide.\")\n sl(2)\n continue\n test_date = OutilsControleurs.test_date(date)\n if test_date == 0:\n print(\"\\nVous avez saisi une valeur trop grande.\")\n date = \"\"\n if test_date == 1:\n print(\"\\nVous avez saisi une valeur trop petite.\")\n date = \"\"\n if test_date == 2:\n break\n if test_date == 3:\n print(\"\\nVous avez saisi un format de date incorrect.\")\n date = \"\"\n\n print(\"\\n---------------------------\")\n nb_tours_modif = \"\"\n while nb_tours_modif != 2 or nb_tours_modif != 1:\n try:\n print(\"\\nNombre de tours\\nPar default le nombre est de 4\\nVoulez-vous modifier cette valeur ?\")\n nb_tours_modif = int(input(\"\\n1 - Oui\\n2 - Non\\n\\nVotre choix: \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un nombre valide.\")\n sl(2)\n continue\n if nb_tours_modif == 1:\n while nb_tours == 4:\n try:\n nb_tours = int(input(\"\\nNombre de tours : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un nombre valide.\")\n sl(2)\n continue\n if nb_tours == 4:\n break\n break\n if nb_tours_modif == 2:\n break\n\n print(\"\\n---------------------------\\n\\nListe des joueurs :\\n\")\n liste_joueurs_tournois = Joueur.joueurs_tournoi()\n if liste_joueurs_tournois == 0:\n print(\"Il n'y a pas ou pas suffisament de joueurs pour organiser un tounois.\")\n print(\"Veuillez ajouter des joueurs via le menu.\")\n input(\"\\nAppuyer sur entrer pour continuer\")\n return\n\n for arg in liste_joueurs_tournois:\n print(arg)\n x = 8\n while x != 0:\n try:\n joueur = int(input(\"Saisir encore {} indice de joueurs : \".format(x)))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un indice valide.\")\n sl(2)\n continue\n if joueur > 0 and joueur <= len(liste_joueurs_tournois):\n if joueur not in joueurs:\n joueurs.append(joueur)\n else:\n print(\"Vous avez deja saisi ce joueur.\")\n x += 1\n else:\n x += 1\n x -= 1\n\n y = 1\n nom_joueurs = []\n for arg in liste_joueurs_tournois:\n arg = arg[:-15]\n nom_joueurs.append(str(arg).replace(\"Indice joueur : {}\\n \".format(y), \"\").replace(\"\\n \", \"\"))\n y += 1\n joueurs = Joueur.get_joueurs_tournoi(joueurs, nom_joueurs)\n\n print(\"\\n---------------------------\")\n temps_choix = 0\n while temps_choix != 1 or temps_choix != 2 or temps_choix != 3:\n try:\n temps_choix = int(input(\"\\nContrôle de temps\\n1 - Bullet\\\n \\n2 - Blitz\\n3 - Coup rapide\\n\\nVotre choix : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi une valeur valide.\")\n sl(2)\n continue\n if temps_choix == 1:\n temps = \"Bullet\"\n break\n if temps_choix == 2:\n temps = \"Blitz\"\n break\n if temps_choix == 3:\n temps = \"Coup rapide\"\n break\n\n print(\"\\n---------------------------\")\n while len(note) == 0:\n try:\n note = str(input(\"\\nDescription : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi une valeur valide.\")\n sl(2)\n continue\n if len(note) == 0:\n break\n return nom, lieu, date, nb_tours, joueurs, temps, note", "def influence(k,L,n):\n try:\n to_check = L[n-1] #set des indices\n contact_direct=C(k,0)\n return list(to_check.intersection(contact_direct))\n except:\n return []", "def parcour_largeur(self, liste=[]):\n if self not in liste:\n liste.append(self)\n for successeur in self.tache.successeur:\n liste = successeur.sommet.parcour_largeur(liste)\n return liste", "def descontarCantidad(self,detalle,producto,cantidad):\n query=LoteModel.obtenerLoteProducto(producto,self.sesion)\n valores=[]\n for a in query:\n loteProducto=LoteProductoModel.buscarLoteProducto(self.sesion,producto,a.codigo).first()\n if cantidad<=loteProducto.cantidad:\n loteProducto.descontarCantidad(cantidad)\n loteProducto.modificar(self.sesion)\n valores.append([loteProducto,cantidad])\n break\n else:\n cantidad-=loteProducto.cantidad\n valores.append([loteProducto,loteProducto.cantidad])\n loteProducto.descontarCantidad(loteProducto.cantidad)\n loteProducto.modificar(self.sesion)\n self.lotesVentas[detalle]=valores\n detalle.agregarLotes(self.sesion,self.lotesVentas[detalle])", "def __call__(self, serv, author, args):\n if not self.bot.has_admin_rights(serv, author):\n return\n if len(args) > 1:\n liste = args[1].split(\"@\")[0]\n query = (\"SELECT id, subject, author, liste FROM moderation \" +\n \"WHERE liste=%s AND moderated=0 ORDER BY date DESC\")\n values = (liste,)\n message = (\"Messages en attente de modération \" +\n \"pour la liste \" + liste + \" :\")\n else:\n query = (\"SELECT id, subject, author, liste FROM moderation \" +\n \"WHERE moderated=0 ORDER BY date DESC\")\n values = ()\n message = \"Messages en attente de modération :\"\n try:\n bdd = self.bot.pgsql_connect(serv)\n assert(bdd is not None)\n except AssertionError:\n return\n\n bdd_cursor = bdd.cursor()\n bdd_cursor.execute(query, values)\n if bdd_cursor.rowcount <= 0:\n self.bot.ans(serv,\n author,\n \"Aucun message en attente de modération.\")\n return\n self.bot.ans(serv, author, message)\n for (ident, subject, author, liste) in bdd_cursor:\n self.bot.say(serv, \"[\" + liste + \"] : « \" + subject + \" » par \" +\n author)\n bdd_cursor.close()", "def liste_decroissante(self):\n\t\tif self.__tete:\n\t\t\treturn self.__tete.liste_decroissante()\n\t\telse:\n\t\t\treturn []", "def __init__(self):\n\n self.notas = []", "def descendre_membre(self, nom_membre):\n membre = self.get_membre(nom_membre)\n indice = self.__membres.index(membre)\n if indice != len(self.__membres) - 1: # si le membre n'est pas en bas\n membre = self.__membres.pop(indice)\n self.__membres.insert(indice + 1, membre)", "def GetContactList(Pos1, Ind1 = None, Pos2 = None, Ind2 = None, \n Radius = 8., MinCO = 3):\n l = []\n N1 = len(Pos1)\n if Ind1 is None: Ind1 = range(0,N1)\n if not Pos2 is None:\n N2 = len(Pos2)\n if Ind2 is None: Ind2 = range(0,N2)\n for i in range(0, N1):\n Vecs = Pos2 - Pos1[i,:]\n Vecs = (Vecs*Vecs).sum(axis=1)\n ladd = [(Ind1[i], Ind2[x])\n for x in flatnonzero(Vecs < Radius*Radius)]\n l.extend(ladd)\n else:\n for i in range(0, N1-1):\n Vecs = Pos1[i+1:] - Pos1[i,:]\n Vecs = (Vecs*Vecs).sum(axis=1)\n ladd = [(Ind1[i], Ind1[i+1+x])\n for x in flatnonzero(Vecs < Radius*Radius)]\n l.extend(ladd)\n #prune for CO\n l = [(a,b) for (a,b) in l if b - a >= MinCO]\n return l", "def __init__(self):\n self.que = []\n self.tem_que = []", "def get_mvts(self, plateau):\n if self.type == \"p\": #Pion\n if self.color == \"w\":\n diags = [[self.x-1, self.y+1],[self.x+1, self.y+1]] #Mouvements possibles de diagonales\n faces = [[self.x, self.y+1]] #Mouvements possibles de face\n if not self.moved: #Si le pion n'a pas encore bougé de la partie\n faces.append([self.x, self.y+2])\n else:\n diags = [[self.x-1, self.y-1], [self.x+1, self.y-1]]\n faces = [[self.x, self.y-1]] #Mouvements possibles de \n if not self.moved:\n faces.append([self.x, self.y-2])\n pos = [] #Position de déplacement validées\n for d in diags:\n if verif_case(d[0], d[1]): #Si la case est sur le plateau \n pion = plateau.get_pion(d[0],d[1])\n if pion != None and pion.color != self.color: #Si il y a un pion ennemi\n pos.append(d)\n for f in faces: \n if verif_case(f[0],f[1]):\n pion = plateau.get_pion(f[0], f[1])\n if pion == None: #Si il n'y a pas de pion\n pos.append(f)\n return pos\n elif self.type == \"t\": #Tour\n pos = []\n dir = [[1,0],[-1,0],[0,1],[0,-1]] #4 directions possibles\n for d in dir:\n x,y = self.x+d[0],self.y+d[1] #Projection de position\n while verif_case(x,y): #Tant que (x, y) est sur le plateau\n pion = plateau.get_pion(x, y)\n if pion != None: #Si il y a un pion\n if pion.color != self.color: #Si il n'est pas allié\n pos.append([x,y])\n break\n pos.append([x,y])\n x += d[0]\n y += d[1]\n return pos\n elif self.type == \"c\": #Cavalier\n l = [-2,-1,1,2]\n mvts = [[x,y] for x in l for y in l if abs(x)!=abs(y)]\n pos = []\n for m in mvts:\n x = self.x + m[0]\n y = self.y + m[1]\n if verif_case(x,y):\n pion = plateau.get_pion(x, y)\n if pion == None or pion.color != self.color:\n pos.append([x, y])\n return pos\n elif self.type == \"f\": #Fou\n dir = [[1,1],[-1,1],[-1,-1],[1,-1]]\n pos = []\n for d in dir:\n x,y = self.x+d[0],self.y+d[1]\n while verif_case(x,y):\n pion = plateau.get_pion(x, y)\n if pion != None:\n if pion.color != self.color:\n pos.append([x,y])\n break\n pos.append([x,y])\n x += d[0]\n y += d[1]\n return pos\n elif self.type == \"k\": #Roi\n mvts = [[1,0],[-1,1],[0,-1],[-1,-1],[-1,0],[-1,1],[0,1],[1,1]] #4 mouvements possibles\n pos = []\n for m in mvts:\n x = self.x + m[0]\n y = self.y + m[1]\n if verif_case(x, y):\n pion = plateau.get_pion(x, y)\n if pion == None or pion.color != self.color:\n pos.append([self.x + m[0], self.y + m[1]])\n return pos\n elif self.type == \"q\": #Dame\n pos = []\n dir = [[1,0],[1,-1],[0,-1],[-1,-1],[-1,0],[-1,1],[0,1],[1,1]]\n for d in dir:\n x,y = self.x+d[0],self.y+d[1]\n while verif_case(x,y):\n pion = plateau.get_pion(x, y)\n if pion != None:\n if pion.color != joueur:\n pos.append([x,y])\n break\n pos.append([x,y])\n x += d[0]\n y += d[1]\n return pos", "def dryrecs():\n click.echo(\"Recommendations, not emailed: \")\n dio_dir: DioDir = DioDir()\n sched: ScheduleABC = DefaultSchedule()\n today: datetime.date = datetime.datetime.now().date()\n res: Optional[List[Person]] = get_recs(dio_dir, sched, today)\n next_day: datetime.date = sched.next_emailing_day(today)\n click.echo(recs_to_message(res, next_day))", "def suppression_check(describers):\n suppressed_describers = []\n unsuppressed_describers = []\n \n for describer_in_question in describers: \n is_unsuppressed = True\n for some_describer in describers: \n if some_describer.suppresses(describer_in_question):\n is_unsuppressed = False\n suppressed_describers.append(describer_in_question)\n # print some_describer.hash, \" suppresses \", describer_in_question.hash\n break\n # if we got here, describer is not suppressed\n if is_unsuppressed:\n unsuppressed_describers.append(describer_in_question)\n return (unsuppressed_describers, suppressed_describers)", "def suffixe(self, autre):\n if autre.tete is None: # liste vide\n return\n\n partage = False # boolean qui indique si la liste partage au moins une cellule ou pas\n for cel in self.cellules():\n if cel.utilisation > 1: # dès qu'on trouve une cellule partagée\n partage = True\n break\n\n if partage: # si la liste partage des cellules\n vu_premiere = True\n doublage = False\n premiere = True\n prec = self.tete\n for cel in self.cellules():\n if vu_premiere and cel.utilisation > 1: # dès qu'on trouve une cel partagée (1 fois)\n vu_premiere = False\n doublage = True\n cel.utilisation -= 1\n if doublage: # si on commence à doubler la partie partagée\n cel_double = Cellule(cel.valeur)\n if premiere: # cas initial (1 fois)\n premiere = False\n if cel == self.tete: # si c'est la première cellule qu'on supprime\n self.tete = cel_double\n else: # cas général\n prec.suivant = cel_double\n else: # cas itératif\n prec.suivant = cel_double\n prec = cel_double\n else:\n prec = cel\n prec.suivant = autre.tete\n autre.tete.utilisation += 1\n\n else: # sinon, si la liste ne partage aucune cellule\n if partage is False:\n for cel in self.cellules():\n if cel.suivant is None:\n cel.suivant = autre.tete\n autre.tete.utilisation += 1\n\n self.taille += autre.taille", "def lista_ventas(self,tipo,lista,filtro):\n self.lista=self.builder.get_object(lista)\n self.lista.clear()#Limpia la lista\n busqueda = \"\"\n\n if tipo==\"\":\n print(\"Llego a buscar ventas en BD\")\n #result=self.db.execute('SELECT * FROM Venta')\n busqueda = self.db.execute('SELECT ventaID ,fechaVenta, fechaInicio, fechaFin, C.nombre, P.nombre FROM Cliente C, Paquete P, Venta V WHERE V.IdCli = C.clienteID AND V.IdPaq = P.paqueteID')\n elif tipo == \"Cliente\":\n print(\"Busco venta por nombre del cliente\")\n busqueda = self.db.execute(\"SELECT ventaID ,fechaVenta, fechaInicio, fechaFin, C.nombre, P.nombre FROM Cliente C, Paquete P, Venta V WHERE V.IdCli = C.clienteID AND V.IdPaq = P.paqueteID AND C.nombre LIKE '%\"+filtro+\"%'\")\n elif tipo == \"Viaje\":\n print(\"Busco venta por nombre del paquete\")\n busqueda = self.db.execute(\"SELECT ventaID ,fechaVenta, fechaInicio, fechaFin, C.nombre, P.nombre FROM Cliente C, Paquete P, Venta V WHERE V.IdCli = C.clienteID AND V.IdPaq = P.paqueteID AND P.nombre LIKE '%\"+filtro+\"%'\")\n elif tipo == \"Fecha de inicio\":\n print(\"Busco venta por fecha de inicio\")\n busqueda = self.db.execute(\"SELECT ventaID ,fechaVenta, fechaInicio, fechaFin, C.nombre, P.nombre FROM Cliente C, Paquete P, Venta V WHERE V.IdCli = C.clienteID AND V.IdPaq = P.paqueteID AND fechaInicio LIKE '%\"+filtro+\"%'\")\n elif tipo == \"Fecha de fin\":\n print(\"Busco venta por fecha de fin\")\n busqueda = self.db.execute(\"SELECT ventaID ,fechaVenta, fechaInicio, fechaFin, C.nombre, P.nombre FROM Cliente C, Paquete P, Venta V WHERE V.IdCli = C.clienteID AND V.IdPaq = P.paqueteID AND fechaFin LIKE '%\"+filtro+\"%'\")\n \n for row in busqueda: \n #Empieza por la [1] porque el ID es la [0]\n # self.lista.append([row[4],row[5],row[1],row[2],row[3]])\n self.lista.append([row[1],row[2],row[3],row[4],row[5],row[0]])\n print(\"Listo ventas en tabla\")", "def get_list_of_dislikers_message(self, mid):\n cursor = self.get_cursor()\n query = 'SELECT users.uid,users.first_name, users.last_name, users.username, ' \\\n 'vote.voted_on ' \\\n 'FROM users INNER JOIN vote ON users.uid = vote.uid AND vote.upvote = FALSE ' \\\n 'INNER JOIN messages ON messages.mid = vote.mid AND messages.mid = %s'\n cursor.execute(query, (mid,))\n return cursor.fetchall()", "def disco_get_items(self,node,iq):\n to=iq.get_to()\n if to and to!=self.jid:\n return iq.make_error_response(\"recipient-unavailable\")\n if not node and self.disco_items:\n return self.disco_items\n return None", "def sendAllViews (self, msg=''): \n robNo= 1 # Pour buildview, le numero commence à 1\n for rob in self.lstRob: #Boucle sur tous nos robots enregistrés\n self.sendView(robNo,msg)\n robNo+=1", "def petite_partie(joueur1: object,\n joueur2: object,\n tableau_invisible_joueur1: list, tableau_invisible_joueur2: list\n ):\n print(\"plateau du joueur 2 : \\n\")\n\n tour_de_jeu(joueur1, joueur2, tableau_invisible_joueur2)\n\n rafraichir_position(joueur2, joueur2.porte_avion, joueur2.torpilleur, joueur2.croiseur)\n verif_bateau(joueur1, joueur2.porte_avion, joueur2.torpilleur, joueur2.croiseur)\n\n print(\"plateau du joueur 1 : \\n\")\n tour_de_jeu(joueur2, joueur1, tableau_invisible_joueur1)\n\n rafraichir_position(joueur1, joueur1.porte_avion, joueur1.torpilleur, joueur1.croiseur)\n verif_bateau(joueur2, joueur1.porte_avion, joueur1.torpilleur, joueur1.croiseur)", "def __init__(self):\n self.que_one = []\n self.que_two = []", "def rellenar_atril(self):\n while self.get_atril_espaciosVacios() > 0 and self.bolsa.cantidad_Fichas() > 0:\n self.agregar_al_atril()", "def modifier_classement_joueur(self, championnat, rapport):\r\n rapport.affichage_classement_championnat(championnat)\r\n championnat = sorted(championnat, key=lambda x: x.classement) # tri joueurs du championnat par classement\r\n print(\"Veuillez indiquer le numéro du joueur à modifier:\")\r\n choix = int(input())\r\n if choix <= len(championnat): # test si choix numero joueur valide\r\n index = choix - 1 # car liste commence a 0\r\n joueur = championnat[index]\r\n nouveau_joueur = copy.deepcopy(joueur)\r\n print(\"Veuillez indiquer le nouveau classement de \" + joueur.nom)\r\n nouveau_classement = int(input())\r\n nouveau_joueur.classement = nouveau_classement\r\n championnat.remove(joueur) # enleve ancienne position du joueur dans classement\r\n championnat.append(nouveau_joueur) # ajoute joueur avec classement actualise\r\n return championnat\r\n else:\r\n print(\"Numero joueur invalide\")\r\n return", "def list(self, irc, msg, args, user, optlist):\n (sender, receiver, old, sent) = (None, None, False, False)\n for (option, arg) in optlist:\n if option == 'old':\n old = True\n if option == 'sent':\n sent = True\n if option == 'from':\n sender = arg\n if option == 'to':\n receiver = arg\n sent = True\n if old:\n return self._oldnotes(irc, msg, sender)\n if sent:\n return self._sentnotes(irc, msg, receiver)\n def p(note):\n return not note.read and note.to == user.id\n if sender:\n originalP = p\n def p(note):\n return originalP(note) and note.frm == sender.id\n notes = list(self.db.select(p))\n if not notes:\n irc.reply('You have no unread notes.')\n else:\n utils.sortBy(operator.attrgetter('id'), notes)\n ids = [self._formatNoteId(msg, note) for note in notes]\n ids = self._condense(ids)\n irc.reply(format('%L.', ids))", "def desaparecer(self,identificador_de_lista):\n self.mapa.delet_bomberman(identificador_de_lista)", "def on_pushButton_precedent_clicked(self):\n \n if self.lineEdit_temperature.text() !=\"\":\n num_pt =int(self.label_pt.text())\n if num_pt - 1 < 1:\n pass\n else:\n \n #effacement\n for ligne in range(11):\n for colonne in range(8):\n if colonne !=6:\n \n self.tableWidget_mesures.setItem(ligne, colonne, QtGui.QTableWidgetItem(None))\n self.lineEdit_temperature.clear()\n self.lineEdit_stab_max.clear()\n self.lineEdit_u_stab_max.clear() \n self.lineEdit_hom_max_2.clear()\n self.lineEdit_u_hom_max.clear()\n \n else:\n pass\n #reafctation des donnees \n self.reaffectation_table_widget_mesures(str(int(self.label_pt.text())-1)) \n \n #presentation textEdit n°pt de la mesure\n self.label_pt.setText(str(num_pt -1))", "def appears(self):", "def obter_lista_sub_pastas(self):\n ##TO DO: não retornar links para arquivos\n if os.path.exists(self.caminho):\n return [arq for arq in self.obter_lista_conteudo() \\\n if not os.path.isfile(arq)]\n else:\n return[]", "def dialoguer(self, dialog,placeDialog):\n perso = []\n \"\"\"if self.niveau.numero == 1 and placeDialog == 0 : #virer cette ligne quand tous les dialogues auront été faits\n musicDialogue.play(pygame.mixer.Sound(\"resources/niveau/{2}/{0}.wav\".format(placeDialog,dialog.counter,self.niveau.numero)))\"\"\"\n for liste in dialog.characters:\n img = pygame.image.load(liste[1]).convert_alpha()\n perso.append([liste[0], img, liste[2]])\n for p in perso:\n #print(p)\n #print(p[1].get_rect())\n rect = p[1].get_rect()\n rect.x, rect.y = p[2]\n p[2] = (rect.x, self.fenetre.hauteur - 100 - rect.height)\n if p[2][0] == 500 :\n p[2]= (constantes.largeur-rect.width,self.fenetre.hauteur-100-rect.height)\n self.fenetre.rafraichir(self.moleculeJoueur.hp)\n while dialog.notFinished:\n punchline = dialog.getPunchline()\n try : #virer cette ligne quand tous les dialogues auront été faits\n audio = pygame.mixer.Sound(\"resources/niveau/{2}/{3}/{0},{1}.wav\".format(placeDialog,dialog.counter,self.niveau.numero,constantes.langue))\n \"\"\"volume = audioDialogue.get_volume()\n multiplier = 1/volume\n audioDialogue.set_volume(multiplier*(1-punchline[1])+0.4,multiplier*(punchline[1])+0.4)\"\"\"\n audioDialogue.play(audio)\n except:\n pass\n posX, posY = perso[punchline[1]][2]\n\n #print(punchline[1][0])\n #pygame.draw.rect(self.fenetre.fen, pygame.Color(0, 0, 0, 0), pygame.Rect(0, 0, self.fenetre.largeur, self.fenetre.hauteur))\n #self.fenetre.fen.blit(sombre, (0,0))\n self.fenetre.assombrir()\n self.fenetre.fen.blit(perso[punchline[1]][1], (posX, posY))\n self.fenetre.dessinerCadre(0, self.fenetre.hauteur-100, 100, self.fenetre.largeur)\n self.font = self.fenetre.font\n surface = self.font.render(perso[punchline[1]][0], 0, pygame.Color(255, 0, 0, 0))\n self.fenetre.dessinerCadre(posX+50, posY-25, 30, surface.get_rect().width+10)\n self.fenetre.ecrireTexte(perso[punchline[1]][0], posX + 55, posY - 20)\n self.fenetre.ecrireTexte(punchline[0], 25, self.fenetre.hauteur-80)\n event = pygame.event.wait()\n #audio = pygame.mixer.Sound(\"resources/temporaire/\"+str(dialog.counter)+\".wav\")\n #audioDialogue(audio)\n reading = True\n while reading:\n event = pygame.event.wait()\n if event.type == KEYDOWN:\n if event.key == constantes.touches[0]:\n reading = False\n if event.key == K_LEFT:\n reading = False\n dialog.counter-=2\n if dialog.counter<0:\n dialog.counter = 0\n\n self.fenetre.rafraichir(self.moleculeJoueur.hp)\n self.fenetre.fen.blit(perso[punchline[1]][1], (posX, posY))\n audioDialogue.stop()\n musicDialogue.stop()", "def search_clues(self):\r\n print(\"\\n************Searching Clues************\\n\")\r\n for word_id in self.words.keys():\r\n if not self.words[word_id].see and not self.words[word_id].wth:\r\n clue = pop_backslash(self.words[word_id].clue)\r\n temp = word_domain(\"allintext:\" + clue +' -crossword',self.words[word_id].length)\r\n temp2 = temp + word_domain(clue +' -crossword',self.words[word_id].length)\r\n domain = temp2 + data_muse(clue, self.words[word_id].length)\r\n unique_list = []\r\n for x in domain: \r\n y = x.upper()\r\n # check if exists in unique_list or not \r\n if y not in unique_list: \r\n unique_list.append(y) \r\n \r\n self.words[word_id].assign_word_domain(unique_list)\r\n print(\"\\nSearch is done...\")", "def get_listu_postaja(self):\n popis = sorted(list(self.postaje))\n return popis", "def __ne__(self, *args):\n return _ida_frame.xreflist_t___ne__(self, *args)", "def genererListe_l_m(listeObstacles,taille,tailleRangee, rangeeId):\n curseurServeur = 0\n curseurObst = 0\n liste_l_m = []\n \n if listeObstacles == []:\n for i in range(tailleRangee - taille + 1):\n liste_l_m.append((rangeeId, i))\n return liste_l_m\n\n while curseurServeur < tailleRangee:\n while(curseurServeur in listeObstacles):\n curseurServeur += 1\n \n while( curseurServeur > listeObstacles[curseurObst]):\n curseurObst += 1\n if curseurObst >= len(listeObstacles):\n if curseurServeur >= tailleRangee:\n return liste_l_m\n elif taille <= tailleRangee - curseurServeur:\n for i in range(curseurServeur, tailleRangee - taille + 1):\n liste_l_m.append((rangeeId, i))\n return liste_l_m\n else :\n return liste_l_m\n \n if taille <= listeObstacles[curseurObst] - curseurServeur :\n for i in range(curseurServeur, listeObstacles[curseurObst] - taille + 1):\n liste_l_m.append((rangeeId, i))\n \n curseurServeur = listeObstacles[curseurObst] + 1 \n return liste_l_m", "def abrir(self):\n assert self.open == False\n self.ne = [n for n in self.ne]\n self.je = [e1 for e1 in self.je]\n self.ie = []\n self.open = True", "def nettoyage(liste_car: List[str]) -> List[str]:\n a_supprimer = string.punctuation + \"«»°\" + string.digits + string.whitespace\n i = 0\n while i < len(liste_car):\n if liste_car[i] in a_supprimer:\n del liste_car[i]\n continue\n i += 1\n return liste_car", "def trobar_dades(dades):\n for item in dades:\n if item:\n return item", "def dodaj_rezervacije(self, rezervacije):\n for r in rezervacije:\n for u in r.seznam_ucilnic:\n for d in r.dnevi_med(self.min_datum, self.max_datum):\n self.rezerviranost_ucilnic[u.pk, d].append(r)", "def getIntervencionesDiputados(self):\n prog_indices = re.compile('(sr.|sra.).*', re.IGNORECASE)\n prog_nombre = re.compile('(sr.|sra.).*,*(\\.-)', re.IGNORECASE)\n\n result = prog_indices.finditer(self.dialogo)\n\n indices = []\n for i in result:\n indices.append(i.span()[0])\n\n dips = []\n for indice in range(len(indices) - 1):\n inicio, final = prog_nombre.match(self.dialogo[indices[indice]:indices[indice + 1]]).span()\n\n discurso = self.dialogo[indices[indice]:indices[indice + 1]]\n\n nombre = discurso[inicio:final]\n dips.append(nombre)\n self.intervenciones.append([nombre, discurso])\n\n dips_unicos = list(set(dips))\n\n for dip in dips_unicos:\n temp_dip = []\n for entrada in self.intervenciones:\n if dip == entrada[0]:\n temp_dip.append(entrada[1])\n\n self.intervenciones_por_diputado[dip] = temp_dip", "def entre_primeros_cola_recurso(self, recurso):\r\n\r\n if self in recurso.cola[0:recurso.capacity]:\r\n return True\r\n else:\r\n return False", "def duplica(self, request, pk=None):\n preventivo_da_clonare = get_object_or_404(PreventivoFornitore, pk=pk)\n \n #print(\"preventivo vecchio: id={}, codice={}, data={}\".format(preventivo_da_clonare.id, preventivo_da_clonare.codice, preventivo_da_clonare.data))\n #for r in preventivo_da_clonare.righe.all():\n # print(\"riga vecchia: id={}, quantità={}, articolo={}, descrizione={}, cancellato={}\".format(r.id, r.quantita, r.articolo, r.articolo_descrizione, r.cancellato))\n\n preventivo_nuovo = get_object_or_404(PreventivoFornitore, pk=pk)\n # resettando l'id e salvando si crea un altro record che ha gli stessi campi...\n preventivo_nuovo.id = None\n preventivo_nuovo.save()\n preventivo_nuovo.data = date.today()\n preventivo_nuovo.codice = PreventivoFornitore.objects.next_codice()\n preventivo_nuovo.accettato = False\n preventivo_nuovo.save()\n \n #print(\"preventivo nuovo: id={}, codice={}\".format(preventivo_nuovo.id, preventivo_nuovo.codice))\n #print(\"preventivo nuovo: data={}\".format(preventivo_nuovo.data))\n for r in preventivo_da_clonare.righe.non_cancellati():\n rn = RigaPreventivoFornitore()\n rn.preventivo = preventivo_nuovo\n rn.articolo = r.articolo\n rn.articolo_descrizione = r.articolo_descrizione\n rn.articolo_prezzo = r.articolo_prezzo\n rn.sconto_percentuale = r.sconto_percentuale\n rn.articolo_unita_di_misura = r.articolo_unita_di_misura\n rn.accettata = False\n rn.quantita = r.quantita\n rn.totale = r.totale\n rn.note = r.note\n rn.save()\n preventivo_nuovo.aggiorna_totale()\n\n #for r in preventivo_nuovo.righe.all():\n # print(\"riga nuova: id={}, quantità={}, articolo={}, descrizione={}\".format(r.id, r.quantita, r.articolo, r.articolo_descrizione))\n\n serializer = PreventivoFornitoreSerializer(preventivo_nuovo)\n return Response(serializer.data, status=status.HTTP_201_CREATED)", "def seleccion(datos,multifasta,querys):\n\n #Hacemos una lista con los nombres de las querys que están en el archivo\n nombres_query=[]\n with open (querys,mode=\"r\") as f:\n for linea in f:\n if linea[0]==\">\":\n nombres_query.append(linea[1:len(linea)-1])\n f.close()\n\n #Obtenemos los nombres de las query y de los subject con los que ha hecho hit\n nombres2=datos[\"Nombre_subject\"]\n nombres1=datos[\"Nombre_query\"]\n nombres1=list(nombres1[1:])\n nombres2=list(nombres2[1:])\n \n seleccion={}#diccionario querys:hits blast\n #Parseamos las listas para obtener el nombre de la query como clave\n #y como valor una lista con los subjects con los que ha hecho hit\n for i in range(len(nombres1)): \n for x in range(len(nombres_query)):\n if nombres_query[x]==nombres1[i]:\n clave=nombres_query[x]\n valor=nombres2[i]\n if clave in seleccion:\n seleccion[clave].append(valor)\n else:\n seleccion[clave]=[valor]\n #Elimino valores duplicados en los valores\n for k, v in seleccion.items():\n nuevo=[]\n for item in v:\n if item not in nuevo:\n nuevo.append(item)\n seleccion[k] = nuevo\n\n #Contador para determinar si se encuentra en una linea con el nombre (>) o con la secuencia\n n=0\n #Contador para recorrer la lista con los nombres de las querys\n cuenta=0\n #Lista con los nombres de los archivos generados\n lista_nombres=[]\n for opciones in seleccion.items():\n abre_query=open(querys,\"r\")#Abrimos el archivo de las querys\n keys=seleccion.keys()#Generamos una lista con las keys del diccionario, que son las querys\n modifica=[]\n modifica1=[]\n modifica2=[]\n modifica3=[]\n\n nombre_archivo=opciones[0]\n with open (multifasta,mode=\"r\") as f:\n with open(nombre_archivo,\"w+\") as archivo: #El nombre de cada archivo será el nombre de su query\n #Forma una lista con todos los hits de blast\n modifica2=opciones[1]\n \n # Forma una lista con el nombre de cada una de las querys\n for x in abre_query: \n if x[0]==\">\":\n modifica1.append(x[1:len(x)-1])\n \n #En caso de que los hits que encuentra en blast no sean las query, las elimina\n eliminar=[item for item in modifica1 if item not in modifica2]\n for r in eliminar:\n modifica1.remove(r)\n \n #Nos quedamos solamente con los hits que encontró en blast, quitando las querys\n modifica3 = [item for item in modifica2 if item not in modifica1]\n modifica3.sort()\n \n #genera la lista con todos los hits, incluidas las query\n if len(modifica1)<=len(keys):\n modifica=modifica1+modifica3\n\n #Forma un archivo por cada query introducida, con los nombres y secuencias\n #que se obtuvieron en el blast\n for linea in f:\n if cuenta==(len(modifica)):\n break\n if linea[1:(len(linea)-1)]==modifica[cuenta]:\n archivo.write(linea)\n n+=1\n elif n==1 and linea[0]!=\">\":\n archivo.write(linea)\n cuenta+=1\n n=0\n else:\n n=0\n lista_nombres=lista_nombres+[nombre_archivo] \n archivo.close()\n n=0\n cuenta=0\n f.close()\n \n \n \n\n \n return lista_nombres", "def del_contact_all(self):\n\n send_key(KEY_MENU)\n delstr = contact.get_value('contact_delete')\n if search_text(delstr):\n click_textview_by_text(delstr)\n click_checkbox_by_id('select_all_check')\n click_button_by_id('btn_ok')\n click_button_by_index(1)\n else:\n goback()\n\n sleep(2) #take a rest to wait view ...", "def casesAdjacentes(self, ligne, colonne, couleur):\r\n couleurs = []\r\n for (a,b) in DIRECTIONS:\r\n if a==b or a==-b:\r\n pass\r\n else:\r\n if 0<= ligne+a < SIZE and 0<= colonne+b < len(COLONNES):\r\n couleurs.append(self.jeu[ligne+a][colonne+b])\r\n return couleurs", "def crammer(self):\n \n matrix = [] # creamos una lista para matrices\n determinant = [] # creamos una lista para las determinantes\n \n # creamos la matriz de la determinante\n d = np.zeros(shape=(self.grado + 1, self.grado + 1))\n \n # Creamos las matrices (determinante + n matrices)\n for k in range(0, self.grado + 1) :\n # Creamos una matriz vacia\n a = np.zeros(shape=(self.grado + 1, self.grado + 1))\n \n if k == 0 :\n # Llenamos la matriz\n for i in range(self.init + 1, self.init + self.grado + 2) :\n # Creamos las n matrices \n aux = []\n # llenamos cada columna\n for j in range(0, self.grado + 1):\n aux.append(mt.pow(sp.Float(self.dato2[i].get()), j))\n \n \n d[i - (self.init +1)] = aux\n \n \n if np.linalg.det(d) == 0: return False\n \n #print d\n #print np.linalg.det(d)\n matrix.append(d) # Almacenamos la matriz\n determinant.append(np.linalg.det(d)) # Almacenamos la determinante\n \n # Llenamos la matriz\n for i in range(self.init + 1, self.init + self.grado + 2) :\n # Creamos las n matrices \n aux = []\n # llenamos cada columna\n for j in range(0, self.grado + 1):\n if k == j :\n aux.append(sp.Float(self.dato1[i].get()))\n else :\n aux.append(mt.pow(sp.Float(self.dato2[i].get()), j))\n \n \n a[i - (self.init +1)] = aux\n \n \n #print a\n #print np.linalg.det(a)\n matrix.append(a) # Almacenamos la matriz\n determinant.append(np.linalg.det(a)) # Almacenamos la determinante\n \n\n # Guardamos la informacion\n self.matrix = matrix\n self.determinant = determinant\n \n return True", "def _nextNotes(self, prev_note, high_note, low_note, voice):\n distance = 1\n high_bound = prev_note.distance(high_note)\n low_bound = prev_note.distance(low_note)\n bound = max([high_bound, low_bound])\n # notes = [prev_note]\n notes = []\n if voice in [1, 2]:\n notes.append(prev_note)\n while distance <= bound:\n if distance <= high_bound:\n total_note_value = prev_note.getTotalValue() + 1 * distance\n note = Note(value2note[total_note_value % 12], total_note_value//12)\n notes.append(note)\n\n if distance <= low_bound:\n total_note_value = prev_note.getTotalValue() - 1 * distance\n note = Note(value2note[total_note_value % 12], total_note_value//12)\n print(prev_note, note)\n notes.append(note)\n \n distance += 1\n\n return notes", "def filtreDsup2(liste):\r\n l=[]\r\n for i in range(len(liste)):\r\n if liste[i][5]>=2: l.append(i)\r\n return l", "def Collection_select_cheap(C:list, n: float)->list:\r\n result = []\r\n for rest in C:\r\n if Restaurant_is_cheap(rest, n) == True:\r\n result.append(rest)\r\n return result", "def on_touch_down(self, touch):\r\n if super(SelectableLabel, self).on_touch_down(touch):\r\n print(\"touched Note \" + str(self.index + 1))\r\n\r\n global note_index\r\n note_index = self.index\r\n\r\n # self.selected = True if self.selected == False else False\r\n\r\n return True\r\n if self.collide_point(*touch.pos) and self.selectable:\r\n return self.parent.select_with_touch(self.index, touch)", "def craft(self, items):\n\n if items[0].looted and items[1].looted and items[2].looted:\n print(\"Seringue fabriquée ! Vous pouvez endormir le garde.\")\n self.stuff = [\"seringue\"]", "def mostrar_promedio_disparo(self):\n participantes = self.__disparos.copy()\n promedios = self.__calcular_promedio_disparo(participantes)\n for promedio in promedios:\n print(\n f\"\"\"\n =================================\n ====== PARTICIPANTE Nº: {promedio['nroParticipante']} ======\n =================================\n Disparos: {promedio['disparos']},\n Nombre: {promedio['nombre']},\n Apellido: {promedio['apellido']},\n Promedio: {promedio['promedio']}\n =================================\n =================================\n \"\"\"\n )", "def getVotersToDial(self):\n return self.getVotersToContact().exclude(\n (Q(phone_number1='') | Q(wrong_phone_number1__gt=1)),\n (Q(phone_number2='') | Q(wrong_phone_number2__gt=1)))", "def carac_reproducciones(caracteristica, valor_min, valor_max, catalog):\n artistasNoRepetidos = lt.newList('ARRAY_LIST')\n artistasRepetidos = lt.newList('ARRAY_LIST')\n MapCaracteristicas = mp.get(catalog['caraContenido'], caracteristica)\n RBTcaracteristica = me.getValue(MapCaracteristicas)\n lista_listas_musica = om.values(RBTcaracteristica, valor_min, valor_max)\n lista_lista_musica = it.newIterator(lista_listas_musica)\n while it.hasNext(lista_lista_musica): \n lista_musica = it.next(lista_lista_musica)#lista_musica es un dicc de listas que tengo que recorrer \n musicas = it.newIterator(lista_musica)\n while it.hasNext(musicas):\n musica = it.next(musicas) #iterar sobre esta lista por artist_id\n if int(lt.isPresent(artistasNoRepetidos, (musica['artist_id']))) == 0:\n lt.addLast(artistasNoRepetidos, musica['artist_id'])\n if int(lt.isPresent(artistasRepetidos, (musica['created_at'] + musica['user_id'] + musica['track_id']))) == 0:\n lt.addLast(artistasRepetidos, (musica['created_at'] + musica['user_id'] + musica['track_id']))\n else:\n if int(lt.isPresent(artistasRepetidos, (musica['created_at'] + musica['user_id'] + musica['track_id']))) == 0:\n lt.addLast(artistasRepetidos, (musica['created_at'] + musica['user_id'] + musica['track_id']))\n return lt.size(artistasRepetidos), lt.size(artistasNoRepetidos)", "def list(self):", "def grande_partie(joueur1: object, joueur2: object,\n tableau_invisible_joueur1: list, tableau_invisible_joueur2: list,\n ):\n print(\"plateau du joueur 2 : \\n\")\n\n tour_de_jeu(joueur1, joueur2, tableau_invisible_joueur2)\n\n rafraichir_position(joueur2, joueur2.porte_avion, joueur2.torpilleur, joueur2.croiseur, joueur2.canonniere,\n joueur2.destroyer)\n verif_bateau(joueur1, joueur2.porte_avion, joueur2.torpilleur, joueur2.croiseur, joueur2.canonniere,\n joueur2.destroyer)\n\n print(\"plateau du joueur 1 : \\n\")\n tour_de_jeu(joueur2, joueur1, tableau_invisible_joueur1)\n\n rafraichir_position(joueur1, joueur1.porte_avion, joueur1.torpilleur, joueur1.croiseur, joueur1.canonniere,\n joueur1.destroyer)\n verif_bateau(joueur2, joueur1.porte_avion, joueur1.torpilleur, joueur1.croiseur, joueur1.canonniere,\n joueur1.destroyer)", "def pretraga_po_roku_trajanja(self, lst, datum):\n pretrazeno = []\n for i in lst:\n if i.rok_trajanja == datum:\n pretrazeno.append(i)\n return pretrazeno", "def Select_ligne(question,Fenetre_largeur,Fenetre_hauteur):\r\n t=\"None\"\r\n efface_tout()\r\n l=1\r\n while True:\r\n background(Fenetre_largeur,Fenetre_hauteur,1)\r\n texte(150,250,question)\r\n texte(410,250,l)\r\n mise_a_jour()\r\n efface_tout()\r\n t=t_fleche2(t)\r\n if t==\"Return\":\r\n break\r\n if t==\"Right\" and l<8:\r\n l+=1\r\n if t==\"Left\" and l>1:\r\n l-=1\r\n return l", "def conclusion_echantillon(self, liste_foetus):\n compteur = 0\n for lignes in range(1, len(liste_foetus)):\n if liste_foetus[lignes].contamination != 0 and liste_foetus[lignes].taux > self.seuil_taux_conta:\n compteur = compteur + 1\n if compteur > self.seuil_nbre_marqueurs:\n self.conclusion = 1\n else:\n self.conclusion = 0", "def separate_frontier(self):\n\t#print self.frontier\n region = [] # a list of tuples\n region_list = [] # a list of regions\n in_list = False\n region_size = 7\n num_regions = 25\n n = 0\n h = 0\n print \"separate frontier\"\n while(region_size>0):\n for i in range(len(self.frontier)):\n if (h < num_regions):\n region = []\n self.find_region(self.frontier[i], region)\n\t\t #rospy.loginfo(region)\n in_list = region in region_list\n if (len(region) > region_size) and (not in_list):\n region_list.append(region)\n h += 1\n self.regions = region_list\n region_size -= 1\n\t#print self.regions", "def mover_bm_derecha(self):\n self.nueva_posicion_posible_parte_superior = self.mapa.consultar_casilla_por_movimiento([self.casilla[0] + 1,\n self.casilla[1]],\n [self.vertice_2[0] + self.velocidad ,\n self.vertice_2[1]],\n [self.vertice_1[0] + 5, self.vertice_1[1]])\n self.nueva_posicion_posible_parte_inferior = self.mapa.consultar_casilla_por_movimiento([self.casilla[0] + 1,\n self.casilla[1] + 1],\n [self.vertice_4[0] + self.velocidad,\n self.vertice_4[1]],\n self.vertice_1)\n if self.nueva_posicion_posible_parte_superior[0] != 1 and self.nueva_posicion_posible_parte_inferior[0] != 1:\n self.x += self.velocidad * (self.x <= 655)\n self.posicion = [self.x,self.posicion[1]]\n self.casilla = [self.casilla[0] + self.nueva_posicion_posible_parte_superior[1], self.casilla[1]]\n self.redefinir_vertices()", "def relacher_arc(modele,dist,pred,fleches,texte,v1,v2):\n \n if dist[v1] + modele.longueur(v1,v2) < dist[v2] :\n dist[v2] = dist[v1] + modele.longueur(v1,v2)\n pred[v2] = v1\n if v2 in fleches :\n modele.delFleche(fleches[v2])\n modele.deltexte(texte[v2])\n fleches[v2] = modele.addFleche(v1,v2,\"Gray\")\n texte[v2] = modele.addTexte(v2,dist[v2])\n modele.observateur.update()\n return True \n return False", "def remonter_membre(self, nom_membre):\n membre = self.get_membre(nom_membre)\n indice = self.__membres.index(membre)\n if indice != 0: # ne fait rien si le membre est déjà tout en haut\n membre = self.__membres.pop(indice)\n self.__membres.insert(indice - 1, membre)", "def update_patients(self, list):\n\n self.llista.delete(0, tk.END)\n for i in range(len(list)):\n self.llista.insert(tk.END, list[i])\n self.llista.bind('<Double-1>', self.select_patient)", "def gen_moves_list(self,color='',dontCallIsAttacked=False):\n \n if(color==''):\n color=self.side2move\n mList=[]\n \n # For each 'piece' on the board (pos1 = 0 to 63)\n for pos1,piece in enumerate(self.cases):\n \n # Piece (or empty square) color is not the wanted ? pass\n if piece.couleur!=color:\n continue\n \n if(piece.nom=='ROI'): # KING\n mList+=piece.pos2_roi(pos1,self.oppColor(color),self,dontCallIsAttacked)\n continue\n \n elif(piece.nom=='DAME'): # QUEEN = ROOK + BISHOP moves !\n mList+=piece.pos2_tour(pos1,self.oppColor(color),self)\n mList+=piece.pos2_fou(pos1,self.oppColor(color),self)\n continue\n \n elif(piece.nom=='TOUR'): # ROOK\n mList+=piece.pos2_tour(pos1,self.oppColor(color),self)\n continue\n \n elif(piece.nom=='CAVALIER'): # KNIGHT\n mList+=piece.pos2_cavalier(pos1,self.oppColor(color),self)\n continue\n \n elif(piece.nom=='FOU'): # BISHOP\n mList+=piece.pos2_fou(pos1,self.oppColor(color),self)\n continue\n \n if(piece.nom=='PION'): # PAWN\n mList+=piece.pos2_pion(pos1,piece.couleur,self)\n continue\n \n return mList", "def create_conserved_motif_list_otherclass(gpcr_pdb,gpcr_aa,j,my_pos,motifs,multiple_chains,chain_name):\n my_pos_bw=my_pos.split(\"x\")[0]\n (my_aa,chain)=gpcr_aa[my_pos]\n add_chain_name=\"\"\n if multiple_chains:\n add_chain_name=\":\"+chain_name \n while j < len(motifs):\n cons_pos_bw = motifs[j][4]\n if my_pos_bw==cons_pos_bw:\n pos_range=find_range_from_cons_pos(my_pos, gpcr_pdb)\n if pos_range:\n motifs[j][2]=True\n motifs[j][3]=pos_range + add_chain_name\n j+=1", "def dislikes(self):\n return self.get_queryset().filter(vote__lt=0)", "def demarrer():\n\n global serpent\n global coordonnees_serpent\n global direction\n global score\n\n serpent = []\n coordonnees_serpent = []\n direction = 0\n score = 0\n\n serpent.extend(\n [TERRAIN.create_rectangle(10 * coté, 7 * coté, 10 * coté+ coté, 7 * coté + coté, fill = \"green\", outline = 'green'),\n TERRAIN.create_rectangle(10 * coté, 7 * coté, 10 * coté+ coté, 7 * coté + coté, fill = \"green\", outline = 'green'),\n TERRAIN.create_rectangle(10 * coté, 7 * coté, 10 * coté+ coté, 7 * coté + coté, fill = \"green\", outline = 'green')]\n )\n coordonnees_serpent=[(10,7),(10,7),(10,7)]\n mouvement()", "def align_tour_ids(self, tour_ids: list) -> list:\n n = self.size()\n \n # levels is list of number of movable cities for each city\n levels = np.zeros(len(tour_ids), dtype=np.uint8)\n for i in tour_ids:\n levels[i] = len(self.get_movable_city_ids(i))\n # print(levels)\n\n for i in range(1, n):\n # Find all posible city indices that (tour_ids[i-1])-th city are linked to\n list_ids = self.get_movable_city_ids(tour_ids[i-1])\n \n # Check if (tour_ids[i-1])-th city and (tour_ids[i])-th city are not linked\n if self.is_movable(tour_ids[i - 1], tour_ids[i]) == False:\n # print(tour_ids[i - 1], tour_ids[i])\n # print(list_ids)\n sub_list_ids = []\n # Loop over posible city indices to filter out connectable city at current time\n for j in list_ids:\n # Make sure j is not in previous part of tour_ids\n # Because i just want to change only the after part of tour_ids which is counted from i position\n if j not in tour_ids[0:i]: \n sub_list_ids.append(j)\n # print(sub_list_ids)\n\n # If sub_list_ids is empty, we drop this tour\n if len(sub_list_ids) == 0:\n return None\n\n # Find argmin of levels\n j_min_in_slice = np.argmin(levels[sub_list_ids])\n j_min = sub_list_ids[j_min_in_slice]\n # print(j_min) \n \n idx_of_j = tour_ids.index(j_min) # find position of j_min value in tour_ids\n # swap two postions\n tour_ids[i], tour_ids[idx_of_j] = j_min, tour_ids[i]\n\n # print(tour_ids)\n \n # Set level of (tour_ids[i-1])-th city is equal 0 when it satisfied the constraint\n levels[tour_ids[i-1]] = 0\n # Update levels of cities has connect with (tour_ids[i-1])-th city:\n for j in list_ids:\n # Avoid levels of city is already satisfied\n if levels[j] > 0:\n levels[j] -= 1\n # print(levels)\n\n return tour_ids", "def lefthide(self):\n self._hideitemlist(ITEMLIST_LEFT)", "def _libre(carte):\n\n cases_libres = deque()\n c_app = cases_libres.append\n for idx in range(UNITE + (UNITE // 2), (LARGEUR - UNITE), UNITE):\n for idy in range(UNITE + (UNITE // 2), (HAUTEUR - UNITE), UNITE):\n if not carte[idy // UNITE][idx // UNITE]:\n c_app((idx, idy))\n\n return cases_libres" ]
[ "0.50994366", "0.4944012", "0.4894424", "0.4877875", "0.4851604", "0.48309383", "0.48296744", "0.47853267", "0.47807354", "0.4748723", "0.47304198", "0.47265714", "0.46883455", "0.4688048", "0.46656272", "0.46631646", "0.465828", "0.4652967", "0.46512303", "0.4631358", "0.4613161", "0.46024942", "0.45816767", "0.45653844", "0.45591402", "0.45501179", "0.4548874", "0.45252314", "0.4510275", "0.45084003", "0.44918716", "0.4478985", "0.44646376", "0.4463184", "0.4457557", "0.44483414", "0.4447355", "0.44420433", "0.44203955", "0.44192988", "0.4402393", "0.4397355", "0.43958402", "0.43838772", "0.43656188", "0.43621767", "0.4359229", "0.43583503", "0.4355865", "0.43554956", "0.43498737", "0.4348964", "0.43459767", "0.4345391", "0.4341491", "0.4339319", "0.43362615", "0.433197", "0.43317991", "0.43298748", "0.43286577", "0.43256098", "0.4324518", "0.43229264", "0.43227193", "0.43218163", "0.4319902", "0.43092135", "0.4306396", "0.42912287", "0.42876807", "0.4286479", "0.4285913", "0.4285671", "0.42814204", "0.42716673", "0.42701364", "0.42680115", "0.42652908", "0.426197", "0.42583734", "0.42557853", "0.42547634", "0.42533267", "0.42422953", "0.42380816", "0.42378712", "0.42378452", "0.42353258", "0.42343825", "0.4231419", "0.422643", "0.42226353", "0.4216426", "0.42158377", "0.42125687", "0.4211528", "0.42107448", "0.42079088", "0.42053285", "0.41989702" ]
0.0
-1
Fonction qui prend en argument un disque Qui renvoie tous les disques en son contact direct ou non (le contact du contact du contact ... est renvoye) et qui se trouve derriere v
def T(v,securite): to_return = {} #renvoie le dictionnaire {indice du contact (0 -> direct / sinon -> plus ou moins direct) : set({disque})} Cv = set(C(v,securite)) Tv = set(Cv) i=0 xv,yv=l[v][0],l[v][1] while Cv != set() and i<5: to_return[str(i)]=Cv new_Cv = set() for j in Cv: xj,yj=l[j][0],l[j][1] #si j est devant v, on ne le copte pas if sqrt((xj-xt)**2+(yj-yt)**2)<sqrt((xv-xt)**2+(yv-yt)**2): continue new_Cv= new_Cv.__or__(C(j,securite).__sub__(Tv.__or__(set(j).__or__({v})))) Tv = Tv.__or__(new_Cv) Cv = new_Cv i+=1 return to_return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def analyse_donnees(self, mere, foetus, pere, log):\n concordance_mf = 0\n concordance_pf = None\n if len(pere) != 0:\n concordance_pf = 0\n log = log + \"Père détecté.................................\\n\"\n log = log + \"\\n\\nVérification concordance des ADNs entre père et foetus..............................\\n\"\n for Alleles in range(len(foetus)):\n for Allele_Foe in range(3):\n if foetus[Alleles].allele[Allele_Foe] in pere[Alleles].allele:\n if foetus[Alleles].allele[Allele_Foe] != 0.0:\n pere[Alleles].concordance_pere_foetus = \"OUI\"\n concordance_pf = concordance_pf + 1\n log = log + \"Concordance pour marqueur \" + str(\n foetus[Alleles].marqueur) + \" OK..................\\n\"\n break\n else:\n pere[Alleles].concordance_pere_foetus = \"NON\"\n log = log + \"Concordance pour marqueur \" + foetus[\n Alleles].marqueur + \" PAS OK..............\\n\"\n break\n log = log + \"\\n\\nVérification concordance des ADNs entre mère et foetus..............................\\n\"\n for Alleles in range(len(foetus)):\n for Allele_Foe in range(3):\n if foetus[Alleles].allele[Allele_Foe] in mere[Alleles].allele:\n if foetus[Alleles].allele[Allele_Foe] != 0.0:\n foetus[Alleles].concordance_mere_foetus = \"OUI\"\n concordance_mf = concordance_mf + 1\n log = log + \"Concordance pour marqueur \" + str(\n foetus[Alleles].marqueur) + \" OK..................\\n\"\n break\n else:\n foetus[Alleles].concordance_mere_foetus = \"NON\"\n log = log + \"Concordance pour marqueur \" + foetus[Alleles].marqueur + \" PAS OK..............\\n\"\n break\n log = log + \"Vérification concordance des ADns terminée..................................\\n\\n\\n\"\n if concordance_mf != len(foetus):\n resultats, conclusion = self.resultat(concordance_mf, concordance_pf, foetus, mere, pere)\n log = log + \"Concordance des ADNs PAS OK....................\\n\"\n log = log + \"Erreur dans l'échantillon...................\\n\"\n log = log + \"Revérifier s'il vous plaît.............\\n\"\n return resultats, conclusion, log\n else:\n log = log + \"Traitement des 15 autres marqueurs..............................\\n\"\n for nbre_lignes in range(1, len(mere)):\n log = log + \"Traitement du marqueur \" + str(foetus[nbre_lignes].marqueur) + \"..........\\n\"\n pic = foetus[nbre_lignes].foetus_pics()\n log = log + \"Calcul du nombre d'allèles pour le foetus......................\\n\"\n log = log + \"Nombre d'allèles pour le foetus : \" + str(pic) + \".........\\n\"\n log = log + \"Vérification de l'homozygotie de la mère......................\\n\"\n mere[nbre_lignes].homozygotie()\n log = log + \"Mère homozygote : \" + str(mere[nbre_lignes].homozygote) + \"...............\\n\"\n log = log + \"Vérification mère et foetus mêmes allèles......................\\n\"\n foetus[nbre_lignes].allele_semblable(mere[nbre_lignes])\n log = log + \"Code de retour vérification allèles semblables: \" + str(\n foetus[nbre_lignes].informatif) + \"...............\\n\"\n log = log + \"Initialisation du taux de contamination pour calcul à venir...............\\n\"\n foetus[nbre_lignes].taux = 0.0\n log = log + \"Taux initialisé.................................\\n\"\n log = log + \"Si code informatif de retour allèles semblables différent de 2, vérification écho.............\\n\"\n log = log + \"Si écho, affection code informatif 3...............\\n\"\n if foetus[nbre_lignes].informatif != 2:\n log = log + \"Vérification si écho......................\\n\"\n mere[nbre_lignes].echo(foetus[nbre_lignes])\n log = log + \"Code retour vérification écho : \" + str(\n foetus[nbre_lignes].informatif) + \"...............\\n\"\n log = log + \"Début chaîne de traitement...........................\\n\"\n if pic == 3:\n log = log + \"Trois allèles détectés......................\\n\"\n foetus[nbre_lignes].contamination_heterozygote(mere[nbre_lignes])\n log = log + \"Marqueur informatif, affectation du code contamination 1..............\\n\"\n foetus[nbre_lignes].informatif = 1\n log = log + \"Calcul taux de contamination du marqueur..........\\n\"\n foetus[nbre_lignes].contamination = 2\n log = log + \"Calcul terminé....................\\n\"\n elif mere[nbre_lignes].homozygote:\n log = log + \"Mère homozygote.......................\\n\"\n log = log + \"Marqueur non informatif, affectation du code informatif 0............\\n\"\n foetus[nbre_lignes].informatif = 0\n elif pic == 2:\n log = log + \"Deux allèles détectés..............\\n\"\n if foetus[nbre_lignes].informatif == 2:\n log = log + \"Si mêmes allèles, vérification homozygote contaminé...............\\n\"\n foetus[nbre_lignes].verif_homozygote_contamine(self)\n if foetus[nbre_lignes].contamination == 1:\n log = log + \"Homozygote contaminé identifié.....................\\n\"\n log = log + \"Calcul du taux de contamination....................\\n\"\n foetus[nbre_lignes].homozygote_contamine(self)\n log = log + \"Calcul du taux de contamination effectué...........\\n\"\n else:\n if foetus[nbre_lignes].informatif != 3:\n log = log + \"Code calcul écho différent de 3..................\\n\"\n log = log + \"Marqueur informatif, affectation du code informatif 1.............\\n\"\n foetus[nbre_lignes].informatif = 1\n log = log + \"Marqueur non contaminé, affectation du code contamination 0................\\n\"\n foetus[nbre_lignes].contamination = 0\n else:\n log = log + \"Un seul allèle détecté............\\n\"\n if foetus[nbre_lignes].informatif != 3:\n log = log + \"Code informatif différent de 3...........\\n\"\n log = log + \"Marqueur informatif, affectation du code informatif 1.............\\n\"\n foetus[nbre_lignes].informatif = 1\n log = log + \"Marqueur non contaminé, affectation du code contamination 0................\\n\"\n foetus[nbre_lignes].contamination = 0\n log = log + \"\\n\\n\"\n log = log + \"Calcul échantillon contaminé ou non......\\n\"\n log = log + \"Marqueur contaminé si >\" + str(self.seuil_taux_conta) + \".......\\n\"\n log = log + \"Echantillon contaminé si plus de \" + str(\n self.seuil_nbre_marqueurs) + \"marqueurs contaminés...\\n\"\n self.conclusion_echantillon(foetus)\n log = log + \"Calcul échantillon terminé.....\\n\"\n log = log + \"Fin de traitement...........\\n\"\n resultats, conclusion = self.resultat(concordance_mf, concordance_pf, foetus, mere, pere)\n return resultats, conclusion, log", "def enchere(self):\n\n i = 0\n while i < 5 and self.annonce < 4:\n paroleJ = self.joueurs[i].parler(self.annonce)\n if paroleJ != 0:\n self.annonce = paroleJ\n self.indiceJoueurQuiPrend = i\n i += 1\n\n print(\"joueur qui prend : \" + str(self.indiceJoueurQuiPrend))\n if self.indiceJoueurQuiPrend != -1:\n print(\"annonce : \" + str(self.annonce))\n if self.annonce == 1 or self.annonce == 2:\n self.joueurs[self.indiceJoueurQuiPrend].possedeChien = True\n self.joueurs[self.indiceJoueurQuiPrend].construireChien()\n self.debuterPartie()\n\n else:\n self.finirPartie()", "def crier_ordres(self, personnage):\n msg = \"{} s'écrie : rameurs, laissez courir !\".format(\n personnage.distinction_audible)\n self.navire.envoyer(msg)", "def crier_ordres(self, personnage):\n adverse = self.adverse\n msg = \"{} s'écrie : un boulet sur {} !\".format(\n personnage.distinction_audible, adverse.desc_survol)\n self.navire.envoyer(msg)", "def envoi_par_mail(self):\n cr , uid, context = self.env.args\n if not self.pool['res.users'].has_group(cr, uid, 'is_plastigray.is_comptable_group'):\n raise Warning(u\"Accès non autorisé !\")\n ids=[]\n for obj in self:\n ids.append(str(obj.id))\n if len(ids)>0:\n SQL=\"\"\"\n select ai.is_mode_envoi_facture, ai.partner_id, ai.name, ai.id\n from account_invoice ai\n where \n ai.id in(\"\"\"+','.join(ids)+\"\"\") and \n ai.is_date_envoi_mail is null and \n ai.is_mode_envoi_facture like 'mail%'\n order by ai.is_mode_envoi_facture, ai.partner_id, ai.name\n \"\"\"\n cr.execute(SQL)\n result = cr.fetchall()\n\n # ** Un mail par client*********************************************\n partners={}\n for row in result:\n if row[0]=='mail_client':\n partner_id = row[1]\n id = row[3]\n if not partner_id in partners:\n partners[partner_id]=[]\n partners[partner_id].append(id)\n #*******************************************************************\n\n\n # ** Un mail+BL par client******************************************\n for row in result:\n if row[0]=='mail_client_bl':\n partner_id = row[1]\n id = row[3]\n if not partner_id in partners:\n partners[partner_id]=[]\n partners[partner_id].append(id)\n #*******************************************************************\n\n\n #** Envoi des mails par partner ************************************\n for partner_id in partners:\n ids=partners[partner_id]\n self._envoi_par_mail(partner_id, ids)\n #*******************************************************************\n\n\n # ** Un mail par facture *******************************************\n for row in result:\n if row[0] in ['mail', 'mail_regroupe_bl']:\n partner_id = row[1]\n id = row[3]\n self._envoi_par_mail(partner_id, [id])\n #*******************************************************************\n\n\n # ** Un mail par facture en double exemplaire **********************\n for row in result:\n if row[0]=='mail2':\n partner_id = row[1]\n id = row[3]\n self._envoi_par_mail(partner_id, [id])\n #*******************************************************************", "def modifier_classement_joueur_tournoi(self, joueurs_tournoi, championnat, rapport):\r\n rapport.affichage_classement_championnat(championnat)\r\n championnat = sorted(championnat, key=lambda x: x.classement) # tri joueurs du championnat par classement\r\n print(\"Veuillez indiquer le numéro du joueur à modifier:\")\r\n choix = int(input())\r\n if choix <= len(championnat): # test si choix numero joueur valide\r\n index = choix - 1 # car liste commence a 0\r\n joueur = championnat[index]\r\n nouveau_joueur = copy.deepcopy(joueur)\r\n print(\"Veuillez indiquer le nouveau classement de \" + joueur.nom)\r\n nouveau_classement = int(input())\r\n nouveau_joueur.classement = nouveau_classement\r\n championnat.remove(joueur) # enleve ancienne position du joueur dans classement\r\n joueurs_tournoi.remove(joueur) # enleve ancienne position du joueur dans tournoi\r\n championnat.append(nouveau_joueur) # ajoute joueur avec classement actualise\r\n joueurs_tournoi.append(nouveau_joueur) # ajoute joueur classement actualise dans liste participants tournoi\r\n return joueurs_tournoi, championnat\r\n else:\r\n print(\"Numero joueur invalide\")\r\n return", "def ir(self):\n if not self._chamados:\n self._andar = 0\n else:\n super().ir() # metodo irado de falar q ta usando function base.", "def afficher(self, personnage, jeu, partie):\n en_main = jeu.en_main.get(personnage)\n tableau = jeu.tableau\n if en_main:\n msg = \"Dans votre main, vous avez {} et {}.\".format(\n en_main[0].nom_complet_indefini,\n en_main[1].nom_complet_indefini)\n else:\n msg = \"Vous n'avez encore rien dans votre main.\"\n \n if tableau:\n tableau = [piece.nom_complet_indefini for piece in tableau]\n aff_tableau = \", \".join(tableau[:-1]) + \" et \" + tableau[-1]\n msg += \"\\nSur le tableau se trouve {}.\".format(aff_tableau)\n\n if partie.tour is personnage:\n msg += \"\\nC'est votre tour.\"\n \n return msg", "def es_satisfecho_por(self, candidata):", "def adelanta_camion(self, entorno, operacion, medio_de_origen_o_destino, camion, tipo):\r\n # TODO mejorar implementacion\r\n\r\n if tipo == \"Operacion\":\r\n\r\n operacion.recurso.cola.remove(camion)\r\n operacion.recurso.cola = \\\r\n operacion.recurso.cola[0:operacion.recurso.cola.index(self)] \\\r\n + [camion] + operacion.recurso.cola[operacion.recurso.cola.index(self):]\r\n\r\n print str(camion) + \" adelantado bajo criterio de \" + str(self) + \" \" + str(entorno.now)\r\n print \"\\tEn sistema: \" + str(operacion.recurso.cola) + \" Hora: \" + str(entorno.now)\r\n\r\n elif tipo == \"Almacen\":\r\n\r\n medio_de_origen_o_destino.cola.remove(camion)\r\n medio_de_origen_o_destino.cola = \\\r\n medio_de_origen_o_destino.cola[0:medio_de_origen_o_destino.cola.index(self)] \\\r\n + [camion] + medio_de_origen_o_destino.cola[medio_de_origen_o_destino.cola.index(self):]\r\n\r\n print str(camion) + \" adelantado bajo criterio de \" + str(self) + \" \" + str(entorno.now)\r\n print \"\\t\" + medio_de_origen_o_destino.nombre + \":\" \\\r\n + str(medio_de_origen_o_destino.cola) + \" Hora: \" + str(entorno.now)", "def afficher(self):\n bordRect = (self.pos[0]-5, self.pos[1]-5, self.dim[0]+5, self.dim[1]+5)\n Fond = pygame.draw.rect(self.ecran.surface, self.ecran.couleur, bordRect, 0) # Efface le precedant text\n\n rang = 0\n verif = \"\"\n compteur = 0\n self.lignes = []\n if self.txt == \"\": self.txt = \" \"\n \n while verif != self.txt:\n verif =\"\"\n rang += self.correction(self.txt[rang:], compteur)\n compteur += 1\n for k in self.lignes:\n verif += k.txt\n\n for compteur in range(len(self.lignes)):\n self.lignes[compteur].afficher()\n\n self.dim = (self.dim[0], self.hLigne*(compteur+1)) # +1 -> Boucle for\n \n pygame.display.flip()", "def __busca_notas(self, tipo_busca, intervalo_inicial, intervalo_final, serie, \r\n\t\t chave_acesso):\r\n info_consulta = \"11\" # padrao 1\r\n\tresposta = \" \"*231 # padrao 230\r\n\t#resposta = None \r\n\r\n status = self.dll.rRetornarInformacao_NFCe_Daruma(tipo_busca, \r\n intervalo_inicial, intervalo_final, serie, chave_acesso, \r\n\t info_consulta, resposta) \r\n\tif status !=1:\r\n\t if status == -1:\r\n\t\traise Exception(\"-1: Erro encontrado na execucao do metodo\")\r\n elif status == -2:\r\n\t\traise Exception(\"-2: Chave Invalida\")\r\n\t elif status == -3:\r\n\t\traise Exception(\"-3: Falha no schema XML.\")\r\n\t elif status == -4:\r\n\t\traise Exception(\"-4: XML fora do padrao\")\r\n\t elif status == -5:\r\n\t\traise Exception(\"-5: Erro generico\")\r\n\t elif status == -8:\r\n\t\traise Exception(\"-8: Usuario nao Autorizado\")\r\n elif status == -9:\r\n\t\traise Exception(\"-9: Usuario nao Licenciado\")\r\n\t elif status == -10:\r\n\t\traise Exception(\"-10: Documento e Ambiente nao identificados\")\r\n\t elif status == -13:\r\n\t\traise Exception(\"-13: Tipo de Documento nao identificado\")\r\n elif status == -14:\r\n\t\traise Exception(\"-14: Erro retornado pelo WebService.\")\r\n elif status == -52:\r\n\t\traise Exception(\"-52: Erro ao gravar em arquivo temporario\")\r\n elif status == -99:\r\n\t\traise Exception(\"-99: Parametros invalidos ou ponteiro nulo de pametros\")\r\n elif status == -99:\r\n\t\traise Exception(\"-103: Nao foram encontradas as DLLs auxiliaes\")\r\n\t else:\r\n\t\traise Exception(\"Erro ao executar o metodo Retornar Informacao.\")", "def findSommetsConnexeTo(self, origine, notVisited):\r\n notVisited.remove(origine) # on retire le sommet des non visités\r\n # print(self.adjMatrix)\r\n for voisin, weight in enumerate(self.adjMatrix[origine]): # Pour chaque voisin de ce point\r\n if weight !=0 and voisin in notVisited: # On y est connecté et on ne l'a pas encore vu\r\n self.findSommetsConnexeTo(voisin, notVisited) # On répète le processus pour ce point\r", "def joueCoup(position,coup):\n nouvelle_pos = clonePosition(position) # on duplique pour ne pas modifier l'original\n n = nouvelle_pos['taille']\n trait = nouvelle_pos['trait']\n # on transforme coup en indice\n if trait == 'SUD':\n indice_depart = coup-1\n else:\n indice_depart = 2*n-coup\n # retrait des graines de la case de depart\n nbGraines = nouvelle_pos['tablier'][indice_depart]\n nouvelle_pos['tablier'][indice_depart] = 0\n # on seme les graines dans les cases a partir de celle de depart\n indice_courant = indice_depart\n while nbGraines > 0:\n indice_courant = (indice_courant + 1) % (2*n)\n if (indice_courant != indice_depart): # si ce n'est pas la case de depart\n nouvelle_pos['tablier'][indice_courant] += 1 # on seme une graine\n nbGraines -= 1\n # la case d'arrivee est dans le camp ennemi ?\n if (trait == 'NORD'):\n estChezEnnemi = (indice_courant < n)\n else:\n estChezEnnemi = (indice_courant >= n)\n # realisation des prises eventuelles\n while estChezEnnemi and (nouvelle_pos['tablier'][indice_courant] in range(2,4)):\n nouvelle_pos['graines'][trait] += nouvelle_pos['tablier'][indice_courant]\n nouvelle_pos['tablier'][indice_courant] = 0\n indice_courant = (indice_courant - 1) % (2*n)\n if (trait == 'NORD'):\n estChezEnnemi = (indice_courant < n)\n else:\n estChezEnnemi = (indice_courant >= n)\n # mise a jour du camp au trait\n if trait == 'SUD':\n nouvelle_pos['trait'] = 'NORD'\n else:\n nouvelle_pos['trait'] = 'SUD'\n return nouvelle_pos", "def control_manteniment_comunicacio(data, sock, address, equips, dades_serv):\n for equip in equips:\n if equip['nom'].__eq__(data[1:6]) and equip['mac'].__eq__(data[8:20]) and equip['estat'].__eq__('DISCONNECTED'):\n enviar_alive_rej(sock, address, 'Equip no registrat al sistema.')\n elif equip['nom'].__eq__(data[1:6]) and equip['mac'].__eq__(data[8:20]) and equip['aleatori'].__eq__(data[21:27]) and equip['address'].__eq__(address):\n if equip['estat'].__eq__('REGISTERED'):\n equip['cont_alives'] = 0\n equip['estat'] = 'ALIVE'\n print_with_time('MSG. => L\\'equip ' + equip['nom'] + ' passa de REGISTERED a ALIVE.')\n enviar_alive_ack(sock, address, dades_serv, equip)\n elif equip['estat'].__eq__('ALIVE'):\n equip['cont_alives'] = 0\n enviar_alive_ack(sock, address, dades_serv, equip)\n elif equip['nom'].__eq__(data[1:6]) and equip['mac'].__eq__(data[8:20]) and not equip['address'].__eq__(address) \\\n and equip['aleatori'].__eq__(data[21:27]):\n enviar_alive_nack(sock, address, 'Discrepancies amb IP address')\n elif equip['nom'].__eq__(data[1:6]) and equip['mac'].__eq__(data[8:20]) and equip['address'].__eq__(address) \\\n and not equip['aleatori'].__eq__(data[21:27]):\n enviar_alive_nack(sock, address, 'Discrepancies amb el nombre aleatori')", "def modification_de_couleur(var,y):\n for i in range(len(Couleurs)): #Permet de savoir à quel valeur\n if L[i] == var: #de la liste la personne est en\n break #arrivant.\n while True:\n rectangle(410,y,437,y+17,'black','#A9A9A9')\n rectangle(410,y+23,437,y+40,'black','#A9A9A9')\n fleche(424,y+20,424,y+5,'black',2)\n fleche(424,y+30,424,y+35,'black',2)\n x2,y2,z2=attente_clic()\n if 410<=x2<=437:\n if y<=y2<=y+17:\n i += 1\n elif y+23<=y2<=y+40:\n i -=1\n else:\n return Couleurs[i],Couleurs2[i]\n if i >= 12:\n i = 0\n if i < 0:\n i = 11\n cercle(100,120,10,Couleurs[i],Couleurs2[i])\n cercle(120,120,10,Couleurs[i],Couleurs2[i])\n cercle(140,120,10,Couleurs[i],Couleurs2[i])", "def afficheRep(question):\r\n reponse = input(\"Quel est votre réponse?\") #permet de rentrer sa reponse\r\n if testReponse(question,reponse) == True:\r\n print(\"Bonne réponse!\")\r\n else:\r\n print(\"FAUX!\")\r\n print(question[6])\r\n return reponse", "def scraper_notizie(self, contenuto_articoli: list):\n tot_menzioni = []\n for articolo in contenuto_articoli:\n # estraggo qualsisasi frase che menziona il giocatore\n sel_regex = f\"[\\w ,;()'’-]+{self.name}[\\w ,;()'’-]+\"\n results = re.findall(sel_regex, articolo)\n\n for res in results:\n # rimuovo il caso in cui sia solo in un elenco, come ad inizio articoli su ATTACCO\n if not re.search(f\", {self.name},\", res):\n tot_menzioni.append(res)\n if len(tot_menzioni) > 0:\n self.news = \"• \" + \"<br>•\".join(tot_menzioni)", "def replace_by_canonical_referent(self, cross_sentence=True):\n replaced_count = 0\n\n # replace the participant in their containers\n def replace_in_containers(myobj, replacing):\n containers = {\n \"event\": \"participants\",\n \"sentiment_expression\": \"targets\",\n \"sentence\": \"participants\",\n }\n for c_name, attrib_n in containers.items():\n c_name = f\"in_{c_name}\"\n if hasattr(myobj, c_name):\n for c in getattr(myobj, c_name):\n to_replace_in = getattr(c, attrib_n)\n to_replace_in.append(replacing)\n if myobj in to_replace_in:\n to_replace_in.remove(myobj)\n to_replace_in.sort(key=lambda x: x.begin)\n\n if self.canonical_referents and self.canonical_referents != \"from_canonref\":\n # sometimes there are multiple canonrefs tagged\n # this can be a) annotation mistake or\n # b) multiple reference to a group, e.g. \"all\" refers to three companies.\n for canonref in self.canonical_referents:\n # check whether canonical referent is in same sentence\n same_sentence = [s.element_id for s in self.in_sentence] == [\n s.element_id for s in canonref.in_sentence\n ]\n # always replace when cross_sentence is true\n # if cross_sentence is False (disallowed) only replace when canonref is in same sentence\n if cross_sentence or same_sentence:\n # replace element\n replacing_participant = Participant(\n canonref.text,\n canonref.begin,\n canonref.end,\n canonref.element_id,\n canonref.annotator_id,\n canonref.document_title,\n canonref.in_document,\n self.role,\n \"from_canonref\",\n self.link_id,\n canonref.tokens,\n )\n replacing_participant.in_sentence = self.in_sentence\n replacing_participant.in_document = self.in_document\n if hasattr(self, \"in_sentiment_expression\"):\n replacing_participant.in_sentiment_expression = (\n self.in_sentiment_expression\n )\n replacing_participant.from_original_participant = copy(self)\n print(\n f\"Replaced {self} with {canonref}. (in same sentence:{same_sentence})\"\n )\n\n replace_in_containers(self, replacing_participant)\n\n # replace on document\n self.in_document.participants.append(replacing_participant)\n if self in self.in_document.participants:\n self.in_document.participants.remove(self)\n\n replaced_count += 1\n return replaced_count", "def cobroNC(self):\n if self.total_a_pagar == 0:\n QtGui.QMessageBox.information(self,\"Aviso\",\"El saldo restante a pagar es cero\")\n else:\n self.rbtnNC.setChecked(True)\n totalFactura = self.total_a_pagar\n numero,ok = QtGui.QInputDialog.getText(self,\"Cobro c/Nota de Crédito\",\"Ingrese número de Nota de Crédito\")\n if ok:\n notaCredito = NotaCreditoModel.getNotaCredito(self.padre.sesion,int(numero))\n if notaCredito == None:\n QtGui.QMessageBox.information(self,\"Aviso\",\"La Nota de Crédito ingresada no existe\")\n elif notaCredito.getTotal(self.padre.sesion) < totalFactura:\n QtGui.QMessageBox.information(self,\"Aviso\",\"El monto de la Nota de Credito es insuficiente\")\n elif notaCredito.getTotal(self.padre.sesion) - CobroClienteModel.getTotalNC(self.padre.sesion,notaCredito.numero) < totalFactura:\n dif = notaCredito.getTotal(self.padre.sesion) - CobroClienteModel.getTotalNC(self.padre.sesion,notaCredito.numero)\n QtGui.QMessageBox.information(self,\"Aviso\",\"La Nota solo posee $\" + str(dif))\n else:\n temp = [\"Nota de Crédito\",self.total_a_pagar,notaCredito.numero]\n self.detalles_cobro[self.tablePagos.rowCount()] = temp\n self.total_a_pagar = 0\n self.actualizar_total()\n self.actualizar_tabla()", "def getCambiosQafectanCaja(self, fechaInicio, fechaFin, usuarioColaborador=\"\"):\n\tif usuarioColaborador == \"\" and fechaInicio == \"\" and fechaFin == \"\":\n\t return self.conexion.ejecutarSQL(\"\"\"select c.id, c.fecha, c.hora, c.codigo_Producto_entra, c.codigo_Producto_sale, c.id_Venta, c.excedente, c.usuario_Colaborador\n from cambios c, ventas v\n where c.id_Venta = v.id\n and c.fecha != v.fecha\"\"\")\n elif usuarioColaborador == \"\":\n return self.conexion.ejecutarSQL(\"\"\"select c.id, c.fecha, c.hora, c.codigo_Producto_entra, c.codigo_Producto_sale, c.id_Venta, c.excedente, c.usuario_Colaborador\n from cambios c, ventas v\n where c.id_Venta = v.id\n and c.fecha != v.fecha\n and c.fecha between '%s' and '%s'\"\"\" %(fechaInicio,fechaFin))\n else:\n return self.conexion.ejecutarSQL(\"\"\"select c.id, c.fecha, c.hora, c.codigo_Producto_entra, c.codigo_Producto_sale, c.id_Venta, c.excedente, c.usuario_Colaborador\n from cambios c, ventas v\n where c.id_Venta = v.id\n and c.fecha != v.fecha\n and c.fecha between '%s' and '%s'\n and c.usuario_Colaborador = '%s'\"\"\" %(fechaInicio,fechaFin,usuarioColaborador))", "def atender(self):\n\n if self.enfila>0: #Para que atiendan solamente si e que hay alguien en la fila\n\n self.enfila-=1\n self.fila.pop(0) #Saco primer elemento de la fila (Atienden al primer cliente)", "def rafraichir_position(adversaire: object, *arg):\n for nom_du_bateau in arg:\n for elements in range(nom_du_bateau.taille_bateau):\n col = nom_du_bateau.coordonnees_bateau[elements][1]\n rangee = nom_du_bateau.coordonnees_bateau[elements][0]\n if adversaire.plateau_joueur.tableau[rangee][col] == \"@\":\n nom_du_bateau.coordonnees_bateau[elements][2] = \"@\"", "def busca_por_chave(self, chave_acesso, serie):\r\n\tnnf_inicial = \"\"\r\n nnf_final = \"\"\r\n self.__busca_notas(\"CHAVE\", nnf_inicial, nnf_final, serie, chave_acesso)", "def question(dico):\n l = []\n for i in range(len(dico)):\n l.append(dico[i][0])\n affichage_question(dico,l)", "def entre_primeros_cola_recurso(self, recurso):\r\n\r\n if self in recurso.cola[0:recurso.capacity]:\r\n return True\r\n else:\r\n return False", "def isspeech(phone):\n return phone not in OTHERS", "def cliquer_sur_unité(self):", "def affichage_creation_tournoi():\n nom = \"\"\n lieu = \"\"\n date = \"\"\n nb_tours = 4\n joueurs = []\n temps = \"\"\n note = \"\"\n\n print(\"\\n---------------------------\")\n while len(nom) == 0:\n try:\n nom = str(input(\"\\nNom : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un nom valide.\")\n sl(2)\n continue\n\n print(\"\\n---------------------------\")\n while len(lieu) == 0:\n try:\n lieu = str(input(\"\\nLieu : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un lieu valide.\")\n sl(2)\n continue\n\n print(\"\\n---------------------------\")\n while len(date) == 0:\n try:\n date = str(input(\"\\nDate\\nFormat : jj/mm/aaaa : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi une date valide.\")\n sl(2)\n continue\n test_date = OutilsControleurs.test_date(date)\n if test_date == 0:\n print(\"\\nVous avez saisi une valeur trop grande.\")\n date = \"\"\n if test_date == 1:\n print(\"\\nVous avez saisi une valeur trop petite.\")\n date = \"\"\n if test_date == 2:\n break\n if test_date == 3:\n print(\"\\nVous avez saisi un format de date incorrect.\")\n date = \"\"\n\n print(\"\\n---------------------------\")\n nb_tours_modif = \"\"\n while nb_tours_modif != 2 or nb_tours_modif != 1:\n try:\n print(\"\\nNombre de tours\\nPar default le nombre est de 4\\nVoulez-vous modifier cette valeur ?\")\n nb_tours_modif = int(input(\"\\n1 - Oui\\n2 - Non\\n\\nVotre choix: \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un nombre valide.\")\n sl(2)\n continue\n if nb_tours_modif == 1:\n while nb_tours == 4:\n try:\n nb_tours = int(input(\"\\nNombre de tours : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un nombre valide.\")\n sl(2)\n continue\n if nb_tours == 4:\n break\n break\n if nb_tours_modif == 2:\n break\n\n print(\"\\n---------------------------\\n\\nListe des joueurs :\\n\")\n liste_joueurs_tournois = Joueur.joueurs_tournoi()\n if liste_joueurs_tournois == 0:\n print(\"Il n'y a pas ou pas suffisament de joueurs pour organiser un tounois.\")\n print(\"Veuillez ajouter des joueurs via le menu.\")\n input(\"\\nAppuyer sur entrer pour continuer\")\n return\n\n for arg in liste_joueurs_tournois:\n print(arg)\n x = 8\n while x != 0:\n try:\n joueur = int(input(\"Saisir encore {} indice de joueurs : \".format(x)))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un indice valide.\")\n sl(2)\n continue\n if joueur > 0 and joueur <= len(liste_joueurs_tournois):\n if joueur not in joueurs:\n joueurs.append(joueur)\n else:\n print(\"Vous avez deja saisi ce joueur.\")\n x += 1\n else:\n x += 1\n x -= 1\n\n y = 1\n nom_joueurs = []\n for arg in liste_joueurs_tournois:\n arg = arg[:-15]\n nom_joueurs.append(str(arg).replace(\"Indice joueur : {}\\n \".format(y), \"\").replace(\"\\n \", \"\"))\n y += 1\n joueurs = Joueur.get_joueurs_tournoi(joueurs, nom_joueurs)\n\n print(\"\\n---------------------------\")\n temps_choix = 0\n while temps_choix != 1 or temps_choix != 2 or temps_choix != 3:\n try:\n temps_choix = int(input(\"\\nContrôle de temps\\n1 - Bullet\\\n \\n2 - Blitz\\n3 - Coup rapide\\n\\nVotre choix : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi une valeur valide.\")\n sl(2)\n continue\n if temps_choix == 1:\n temps = \"Bullet\"\n break\n if temps_choix == 2:\n temps = \"Blitz\"\n break\n if temps_choix == 3:\n temps = \"Coup rapide\"\n break\n\n print(\"\\n---------------------------\")\n while len(note) == 0:\n try:\n note = str(input(\"\\nDescription : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi une valeur valide.\")\n sl(2)\n continue\n if len(note) == 0:\n break\n return nom, lieu, date, nb_tours, joueurs, temps, note", "def possessif(nom):\n\n CA = nom[1]\n\n\n rand = randint(0,5)\n\n if CA == \"-1\" or CA == \"-3\" or CA == \"-5\" or CA == \"-7\" or CA == \"-8\" or CA == \"-4\" or Premiere_lettre_voyelle(nom[0]):\n if rand == 0:\n return \"mon \"\n elif rand == 1:\n return \"ton \"\n elif rand == 2:\n return \"son \"\n elif rand == 3:\n return \"notre \"\n elif rand == 4:\n return \"votre \"\n elif rand == 5:\n return \"leur \"\n \n elif (CA == \"-2\" or CA == \"-6\" or CA == \"-9\"):\n if rand == 0:\n return \"ma \"\n elif rand == 1:\n return \"ta \"\n elif rand == 2:\n return \"sa \"\n elif rand == 3:\n return \"notre \"\n elif rand == 4:\n return \"votre \"\n elif rand == 5:\n return \"leur \"\n else:\n return False", "def atencion_ingreso(self, camion):\r\n\r\n operaciones = self.operaciones[\"Operaciones complementarias\"]\r\n\r\n if camion.tipo == \"Descarga\":\r\n yield self.process(operaciones[\"Atencion recepcion 1\"]\r\n .ejecutar(self, camion))\r\n else:\r\n yield self.process(operaciones[\"Atencion despacho 1\"]\r\n .ejecutar(self, camion))\r\n\r\n if camion.carga not in [\"Contenedor 20\", \"Contenedor 40\"] and \\\r\n not (camion.tipo == \"Carga\" and camion.carga == \"Harina de Soya - Hi Pro/Pellet de Soya\"):\r\n yield self.process(operaciones[\"Primer pesaje\"]\r\n .ejecutar(self, camion))\r\n self.exit(camion.nombre)", "def duplica(self, request, pk=None):\n preventivo_da_clonare = get_object_or_404(PreventivoFornitore, pk=pk)\n \n #print(\"preventivo vecchio: id={}, codice={}, data={}\".format(preventivo_da_clonare.id, preventivo_da_clonare.codice, preventivo_da_clonare.data))\n #for r in preventivo_da_clonare.righe.all():\n # print(\"riga vecchia: id={}, quantità={}, articolo={}, descrizione={}, cancellato={}\".format(r.id, r.quantita, r.articolo, r.articolo_descrizione, r.cancellato))\n\n preventivo_nuovo = get_object_or_404(PreventivoFornitore, pk=pk)\n # resettando l'id e salvando si crea un altro record che ha gli stessi campi...\n preventivo_nuovo.id = None\n preventivo_nuovo.save()\n preventivo_nuovo.data = date.today()\n preventivo_nuovo.codice = PreventivoFornitore.objects.next_codice()\n preventivo_nuovo.accettato = False\n preventivo_nuovo.save()\n \n #print(\"preventivo nuovo: id={}, codice={}\".format(preventivo_nuovo.id, preventivo_nuovo.codice))\n #print(\"preventivo nuovo: data={}\".format(preventivo_nuovo.data))\n for r in preventivo_da_clonare.righe.non_cancellati():\n rn = RigaPreventivoFornitore()\n rn.preventivo = preventivo_nuovo\n rn.articolo = r.articolo\n rn.articolo_descrizione = r.articolo_descrizione\n rn.articolo_prezzo = r.articolo_prezzo\n rn.sconto_percentuale = r.sconto_percentuale\n rn.articolo_unita_di_misura = r.articolo_unita_di_misura\n rn.accettata = False\n rn.quantita = r.quantita\n rn.totale = r.totale\n rn.note = r.note\n rn.save()\n preventivo_nuovo.aggiorna_totale()\n\n #for r in preventivo_nuovo.righe.all():\n # print(\"riga nuova: id={}, quantità={}, articolo={}, descrizione={}\".format(r.id, r.quantita, r.articolo, r.articolo_descrizione))\n\n serializer = PreventivoFornitoreSerializer(preventivo_nuovo)\n return Response(serializer.data, status=status.HTTP_201_CREATED)", "def tri_si_rencontre(self, joueurs_tries, liste_rencontres, nb_joueurs):\n # We recover the possibilities\n for x in joueurs_tries:\n liste_dict = []\n for y in joueurs_tries:\n if x == y:\n continue\n if (x, y) in liste_rencontres or (y, x) in liste_rencontres:\n continue\n else:\n liste_dict.append(y)\n self.dict_possiblity[x] = liste_dict\n copy_joueurs = list(joueurs_tries)\n liste_finale = []\n nb_tour = 0\n error = False\n while joueurs_tries:\n x = joueurs_tries[0]\n for y in joueurs_tries:\n if nb_tour > nb_joueurs**2:\n print(\"Il y a une erreur dans l'algorithme.\")\n error = True\n break\n if x == y:\n continue\n if (x, y) in liste_rencontres or (y, x) in liste_rencontres:\n nb_tour += 1\n continue\n else:\n i = 0\n # we are looking for a unique possibility\n for key in list(self.dict_possiblity):\n if len(self.dict_possiblity[key]) == 1:\n valeur = self.dict_possiblity[key][0]\n liste_finale.append((key, valeur))\n liste_rencontres.append((key, valeur))\n joueurs_tries.remove(key)\n joueurs_tries.remove(valeur)\n self.sup_dicti(valeur, key)\n i += 1\n break\n if i > 0:\n break\n # we remove both of the possibilities\n self.sup_dicti(x, y)\n liste_finale.append((x, y))\n liste_rencontres.append((x, y))\n joueurs_tries.remove(y)\n joueurs_tries.remove(x)\n break\n if error:\n liste_finale = Vue().demander_binomes(copy_joueurs,\n nb_joueurs)\n return liste_finale\n return liste_finale", "def mot_possible(mot:str,lettres:str)->bool:\r\n retour = False\r\n L=[]\r\n for i in lettres:\r\n if presente(i,mot)!=-1 :\r\n L.append(i)\r\n if len(L)>=len(mot):\r\n retour = True\r\n\r\n return(retour)", "def modifier_classement_joueur(self, championnat, rapport):\r\n rapport.affichage_classement_championnat(championnat)\r\n championnat = sorted(championnat, key=lambda x: x.classement) # tri joueurs du championnat par classement\r\n print(\"Veuillez indiquer le numéro du joueur à modifier:\")\r\n choix = int(input())\r\n if choix <= len(championnat): # test si choix numero joueur valide\r\n index = choix - 1 # car liste commence a 0\r\n joueur = championnat[index]\r\n nouveau_joueur = copy.deepcopy(joueur)\r\n print(\"Veuillez indiquer le nouveau classement de \" + joueur.nom)\r\n nouveau_classement = int(input())\r\n nouveau_joueur.classement = nouveau_classement\r\n championnat.remove(joueur) # enleve ancienne position du joueur dans classement\r\n championnat.append(nouveau_joueur) # ajoute joueur avec classement actualise\r\n return championnat\r\n else:\r\n print(\"Numero joueur invalide\")\r\n return", "def cliquer(self):\n self.nb_clic += 1\n self.message[\"text\"] = \"Vous avez cliqué {} fois.\".format(self.nb_clic)", "def fim_da_rodada(self, recompensa, m, numero_de_cacadores):\n #print('Jogador 4 {}'.format(self.historico[-1]))\n pass", "def conclusion_echantillon(self, liste_foetus):\n compteur = 0\n for lignes in range(1, len(liste_foetus)):\n if liste_foetus[lignes].contamination != 0 and liste_foetus[lignes].taux > self.seuil_taux_conta:\n compteur = compteur + 1\n if compteur > self.seuil_nbre_marqueurs:\n self.conclusion = 1\n else:\n self.conclusion = 0", "def get_only_wanted(self, datas_percorrer, index_data_1):\n \n # Dicionario de datas guardadas, chamando a function\n # OrderedDict() que lembra a ordem de cada item\n datas_guardadas = OrderedDict()\n \n # Sub_dicionario para datas/ano, chamando a function\n # OrderedDict() que lembra a ordem de cada item\n datas_guardadas_ano = OrderedDict()\n\n # Sub_dicionario para datas/mes, chamando a function\n # OrderedDict() que lembra a ordem de cada item\n datas_guardadas_mes = OrderedDict()\n \n # Lista com o nome dos meses\n meses_ano = [\n 'Janeiro',\n 'Fevereiro',\n 'Março',\n 'Abril',\n 'Maio',\n 'Junho',\n 'Julho',\n 'Agosto',\n 'Setembro',\n 'Outubro',\n 'Novembro',\n 'Dezembro'\n ]\n\n # Lista com o nome dos dias da semana\n dias_semana = [\n 'Domingo',\n 'Segunda-Feira',\n 'Terça-Feira',\n 'Quarta-Feira',\n 'Quinta-Feira',\n 'Sexta-Feira',\n 'Sábado',\n ]\n \n # lista de filtro dos dias desejados\n dias_desejados = [ 0, 2, 6]\n \n # lista contendo o numero do primeiro dia do mes\n # em relação ao numero total de dias no ano\n primeiro_dia_mes = [\n #Primeiro dia de cada mês\n 0, 31, 59, 90,\n 120, 151, 181, 212,\n 243, 273, 304, 334\n ]\n \n # lista de meses com 30 dias \n meses_trinta = [ 4, 6, 9, 11 ]\n \n # Esta variavel trará 31 dias para os não\n # estiverem a lista 'meses_trinta'\n numero_dias_mes = 31\n \n # Numero do dia atual\n numero_dia_ano = primeiro_dia_mes[self.mes -1] + self.dia\n \n # Cria variaveis para trabalhar com dia, mes, ano\n # e index para a lista 'dias_semana'\n dia_atual = self.dia\n mes_atual = self.mes\n ano_atual = self.ano\n sendo_dia = index_data_1\n # Variável para ano bissexto\n se_bissexto = False\n # Verifica se ano é bissexto\n if (ano_atual %4 == 0 and ano_atual %100 != 0):\n se_bissexto = True\n elif ano_atual %400 == 0:\n se_bissexto = True\n else:\n se_bissexto = False\n\n # Nome mes atual\n nome_mes_atual = ''\n \n # Inicia loop para filtrar dias\n for dia_passado in range(0, datas_percorrer + 1):\n\n #Da nome ao mes\n nome_mes_atual = meses_ano[mes_atual - 1]\n \n # Verifica se mes atual esta na lista meses_trinta\n # se true, o mes tem 30 dias\n if mes_atual in meses_trinta:\n numero_dias_mes = 30\n # Se o mes atual é = 2 (fevereiro), o mes possui 28 dias\n elif mes_atual == 2:\n numero_dias_mes = 28\n # Porem se for bissexto, o mes tem 29 dias.\n if se_bissexto == True:\n numero_dias_mes = 29\n else:\n numero_dias_mes = 31\n \n # Verifica se a data passa no filtro 'dias desejados'\n if sendo_dia in dias_desejados:\n # Concatena chave\n chave_dia_mes = str(dia_atual)\n #chave_dia_mes += '/' + str(mes_atual)\n # Concatena valor\n valor_semana = dias_semana[sendo_dia]\n # Guarda as datas no dicionario mes\n datas_guardadas_mes[chave_dia_mes] = valor_semana\n\n # Adiciona uma unidade no numero_do_dia\n # na data atual e no index do dia\n numero_dia_ano += 1\n dia_atual += 1\n sendo_dia += 1\n # Cria ou adiciona o dicionario mes no dicionario de ano\n datas_guardadas_ano[nome_mes_atual] = datas_guardadas_mes\n # Cria ou adiciona o dicionario ano no dicionario geral\n datas_guardadas[ano_atual] = datas_guardadas_ano\n \n # Se o index após a adição for > 6, retorna 0\n if sendo_dia > 6:\n sendo_dia = 0\n \n # Se o dia atual for maior que o numero total\n # de dias do mes, retorna dia primeiro do mes seguinte\n if dia_atual > numero_dias_mes:\n dia_atual = 1\n mes_atual += 1\n datas_guardadas_mes = OrderedDict()\n # Se o mes > 12, retorna janeiro, primeiro do ano seguinte\n if mes_atual > 12:\n mes_atual = 1\n numero_dia_ano = 1\n ano_atual += 1\n datas_guardadas_ano = OrderedDict()\n # Verifica se ano seguinte é bissexto\n if (ano_atual %4 == 0 and ano_atual %100 != 0):\n se_bissexto = True\n elif ano_atual %400 == 0:\n se_bissexto = True\n else:\n se_bissexto = False\n \n return(datas_guardadas)", "def finTour(self):\n print(\"fin du tour\")\n self.etat = \"Fin\"\n if self.joueurActif.nbRessource + self.joueurActif.getNbRessourceTour() <= self.joueurActif.nbMaxRessource :\n self.joueurActif.nbRessource += self.joueurActif.getNbRessourceTour()\n else:\n self.joueurActif.nbRessource = self.joueurActif.nbMaxRessource\n print(self.joueurActif.nbRessource)\n if self.joueurActif == self.joueur1:\n self.joueurActif = self.joueur2\n print(\"Au joueur 2 de jouer\")\n else:\n self.joueurActif = self.joueur1\n print(\"Au joueur 1 de jouer\")\n for iEntite in self.joueurActif.entiteResetDeplacement:\n iEntite.setMoove(True)\n for iEntite in self.joueurActif.entiteResetCombat:\n iEntite.setCanAttack(True)\n \n if self.joueur1.nbRessource >= 2000:\n print(\"FIN DE LA PARTIE LE JOUEUR 1 A GAGNER\")\n if self.joueur2.nbRessource >= 2000:\n print(\"FIN DE LA PARTIE LE JOUEUR 2 A GAGNER\") \n \n self.etat = \"En jeu\"", "def ndemeye(self, message):\n\n try: activate(message.contact.language)\n except: activate('rw')\n\n try:\n message.reporter = Reporter.objects.filter(national_id = message.connection.contact.name )[0]\n except Exception, e:\n try: message.supervisor = Supervisor.objects.filter(email = message.connection.contact.name )[0]\n except Exception,e:\n message.respond(_(\"You need to be registered first\"))\n return True\n\n try:\n cnf = RegistrationConfirmation.objects.get(reporter = message.reporter)\n cnf.received = datetime.now()\n cnf.responded = True\n cnf.answer = True\n cnf.save()\n except Exception, e:\n print e\n if message.supervisor:\n message.respond(\"Muraho murakomeye! Ohereza ijambo 'WHO' urebeko wanditse neza, kandi wibutse abajyanamako bagomba kohereza ubutumwa kuri %s. Murakoze\" % settings.SHORTCODE) \n else: message.respond(_(\"You need to be registered first\"))\n return True \t\t\t \n\n message.respond(\"Muraho murakomeye! Mwatangira kohereza ubutumwa ku buzima bw'umubyeyi n'umwana kuri Rapidsms numero %s.\\\n Ohereza ijambo 'WHO' urebeko wanditse neza. Murakoze\" % settings.SHORTCODE)\n\n return True", "def inscricao(self):\n\n return True", "def controlList(user):\n insListe = Inserito.objects.filter(user=user, cancellato=False).select_related('listaAttesa')\n for l in insListe:\n numPosti = l.listaAttesa.corso.cap - l.listaAttesa.corso.posti_prenotati\n if numPosti > 0:\n testo = \"Si è liberato un posto per il corso \"+l.listaAttesa.corso.nome+ \" che si tiene il \"+ str(l.listaAttesa.corso.data) + \" alle \"+ str(l.listaAttesa.corso.ora_inizio)\n noreply = User.objects.get(username='noreply')\n notifica = Messaggio(userMittente=noreply, userDestinatario=user, data_ora=datetime.datetime.today(), text=testo)\n checkEx = Messaggio.objects.filter(userMittente=noreply, userDestinatario=user, text=testo).exists()\n if not checkEx:\n notifica.save()\n changeListRecord = Inserito.objects.get(user=user, listaAttesa=l.listaAttesa)\n changeListRecord.cancellato = True\n changeListRecord.save()", "def __call__(self, serv, author, args):\n if not self.bot.has_admin_rights(serv, author):\n return\n if len(args) > 1:\n liste = args[1].split(\"@\")[0]\n query = (\"SELECT id, subject, author, liste FROM moderation \" +\n \"WHERE liste=%s AND moderated=0 ORDER BY date DESC\")\n values = (liste,)\n message = (\"Messages en attente de modération \" +\n \"pour la liste \" + liste + \" :\")\n else:\n query = (\"SELECT id, subject, author, liste FROM moderation \" +\n \"WHERE moderated=0 ORDER BY date DESC\")\n values = ()\n message = \"Messages en attente de modération :\"\n try:\n bdd = self.bot.pgsql_connect(serv)\n assert(bdd is not None)\n except AssertionError:\n return\n\n bdd_cursor = bdd.cursor()\n bdd_cursor.execute(query, values)\n if bdd_cursor.rowcount <= 0:\n self.bot.ans(serv,\n author,\n \"Aucun message en attente de modération.\")\n return\n self.bot.ans(serv, author, message)\n for (ident, subject, author, liste) in bdd_cursor:\n self.bot.say(serv, \"[\" + liste + \"] : « \" + subject + \" » par \" +\n author)\n bdd_cursor.close()", "def seleccion(datos,multifasta,querys):\n\n #Hacemos una lista con los nombres de las querys que están en el archivo\n nombres_query=[]\n with open (querys,mode=\"r\") as f:\n for linea in f:\n if linea[0]==\">\":\n nombres_query.append(linea[1:len(linea)-1])\n f.close()\n\n #Obtenemos los nombres de las query y de los subject con los que ha hecho hit\n nombres2=datos[\"Nombre_subject\"]\n nombres1=datos[\"Nombre_query\"]\n nombres1=list(nombres1[1:])\n nombres2=list(nombres2[1:])\n \n seleccion={}#diccionario querys:hits blast\n #Parseamos las listas para obtener el nombre de la query como clave\n #y como valor una lista con los subjects con los que ha hecho hit\n for i in range(len(nombres1)): \n for x in range(len(nombres_query)):\n if nombres_query[x]==nombres1[i]:\n clave=nombres_query[x]\n valor=nombres2[i]\n if clave in seleccion:\n seleccion[clave].append(valor)\n else:\n seleccion[clave]=[valor]\n #Elimino valores duplicados en los valores\n for k, v in seleccion.items():\n nuevo=[]\n for item in v:\n if item not in nuevo:\n nuevo.append(item)\n seleccion[k] = nuevo\n\n #Contador para determinar si se encuentra en una linea con el nombre (>) o con la secuencia\n n=0\n #Contador para recorrer la lista con los nombres de las querys\n cuenta=0\n #Lista con los nombres de los archivos generados\n lista_nombres=[]\n for opciones in seleccion.items():\n abre_query=open(querys,\"r\")#Abrimos el archivo de las querys\n keys=seleccion.keys()#Generamos una lista con las keys del diccionario, que son las querys\n modifica=[]\n modifica1=[]\n modifica2=[]\n modifica3=[]\n\n nombre_archivo=opciones[0]\n with open (multifasta,mode=\"r\") as f:\n with open(nombre_archivo,\"w+\") as archivo: #El nombre de cada archivo será el nombre de su query\n #Forma una lista con todos los hits de blast\n modifica2=opciones[1]\n \n # Forma una lista con el nombre de cada una de las querys\n for x in abre_query: \n if x[0]==\">\":\n modifica1.append(x[1:len(x)-1])\n \n #En caso de que los hits que encuentra en blast no sean las query, las elimina\n eliminar=[item for item in modifica1 if item not in modifica2]\n for r in eliminar:\n modifica1.remove(r)\n \n #Nos quedamos solamente con los hits que encontró en blast, quitando las querys\n modifica3 = [item for item in modifica2 if item not in modifica1]\n modifica3.sort()\n \n #genera la lista con todos los hits, incluidas las query\n if len(modifica1)<=len(keys):\n modifica=modifica1+modifica3\n\n #Forma un archivo por cada query introducida, con los nombres y secuencias\n #que se obtuvieron en el blast\n for linea in f:\n if cuenta==(len(modifica)):\n break\n if linea[1:(len(linea)-1)]==modifica[cuenta]:\n archivo.write(linea)\n n+=1\n elif n==1 and linea[0]!=\">\":\n archivo.write(linea)\n cuenta+=1\n n=0\n else:\n n=0\n lista_nombres=lista_nombres+[nombre_archivo] \n archivo.close()\n n=0\n cuenta=0\n f.close()\n \n \n \n\n \n return lista_nombres", "def relacher_arc(modele,dist,pred,fleches,texte,v1,v2):\n \n if dist[v1] + modele.longueur(v1,v2) < dist[v2] :\n dist[v2] = dist[v1] + modele.longueur(v1,v2)\n pred[v2] = v1\n if v2 in fleches :\n modele.delFleche(fleches[v2])\n modele.deltexte(texte[v2])\n fleches[v2] = modele.addFleche(v1,v2,\"Gray\")\n texte[v2] = modele.addTexte(v2,dist[v2])\n modele.observateur.update()\n return True \n return False", "def non_plein(self):\n # pass\n # mon travail\n # si on trouve un espace dans le plateau , il n'est pas plein ==> true\n b = False\n for i in range(0, 3):\n for j in range(0, 3):\n if self.cases[(i, j)].contenu == \" \":\n b = True\n return b", "def listar(channel, where, ebrios):\n if ebrios[where]:\n response = 'Los que se coparon en ' + where + ':'\n for ebrio in ebrios[where]:\n response += '\\n' + vos_quien_sos(ebrio) + ''\n else:\n response = 'No hay after armado en ' + where + \\\n ', podrías armar uno <@' + user + '>'\n postea(channel, response)", "def recherche_article_nom(self,name) :\n if name == self.get_Nom() :\n return True\n return False", "def dodaj_novu_zs_ref_vrijednost(self):\r\n logging.info('Request za dodavanjem nove zero/span referentne vrijednosti')\r\n self.emit(QtCore.SIGNAL('dodaj_novu_referentnu_vrijednost'))", "def vendre(self, symbole, quantite, une_date=date.today()):\n\n if une_date > date.today():\n raise ErreurDate(\"La date est postérieure à la date d'aujourd'hui\")\n\n else:\n if symbole in self.portefeuille:\n quantite_titre = 0.0\n\n for les_jours in self.portefeuille[symbole]:\n if les_jours <= une_date:\n quantite_titre += self.portefeuille[symbole][les_jours]\n\n if quantite_titre < quantite:\n raise ErreurQuantité(\"Quantité insuffisante pour effectuer la vente\")\n\n else:\n if une_date in self.portefeuille[symbole]:\n self.portefeuille[symbole][une_date] -= float(quantite)\n\n elif une_date not in self.portefeuille[symbole]:\n self.portefeuille[symbole][une_date] = - float(quantite)\n\n cout = self.marche.prix(symbole, une_date) * quantite\n if une_date in self.argent:\n self.argent[une_date] += float(cout)\n\n elif une_date not in self.argent:\n self.argent[une_date] = float(cout)\n\n #Ca sert tu a de quoi ca ? Yes le chum\n else:\n raise ErreurQuantité(\"Le titre ne fait pas partie du portefeuille\")", "def afficher(dico):\n return dico", "def indexe_chercheur(ldapid, labo_accro, labhalid, idhal, idref, orcid): # self,\n # progress_recorder = ProgressRecorder(self)\n # progress_recorder.set_progress(0, 10, description='récupération des données LDAP')\n if mode == \"Prod\":\n server = Server(\"ldap.univ-tln.fr\", get_info=ALL)\n conn = Connection(\n server,\n \"cn=Sovisu,ou=sysaccount,dc=ldap-univ-tln,dc=fr\",\n config(\"ldappass\"),\n auto_bind=True,\n ) # recup des données ldap\n conn.search(\n \"dc=ldap-univ-tln,dc=fr\",\n \"(&(uid=\" + ldapid + \"))\",\n attributes=[\n \"displayName\",\n \"mail\",\n \"typeEmploi\",\n \"ustvstatus\",\n \"supannaffectation\",\n \"supanncodeentite\",\n \"supannEntiteAffectationPrincipale\",\n \"labo\",\n ],\n )\n dico = json.loads(conn.response_to_json())[\"entries\"][0]\n structid = config(\"structId\")\n else:\n dico = {\n \"attributes\": {\n \"displayName\": \"REYMOND David\",\n \"labo\": [],\n \"mail\": [\"david.reymond@univ-tln.fr\"],\n \"supannAffectation\": [\"IMSIC\", \"IUT TC\"],\n \"supannEntiteAffectationPrincipale\": \"IUTTCO\",\n \"supanncodeentite\": [],\n \"typeEmploi\": \"Enseignant Chercheur Titulaire\",\n \"ustvStatus\": [\"OFFI\"],\n },\n \"dn\": \"uid=dreymond,ou=Personnel,ou=people,dc=ldap-univ-tln,dc=fr\",\n }\n structid = \"198307662\"\n ldapid = \"dreymond\"\n labo = labhalid\n\n extrait = dico[\"dn\"].split(\"uid=\")[1].split(\",\")\n chercheur_type = extrait[1].replace(\"ou=\", \"\")\n suppan_id = extrait[0]\n if suppan_id != ldapid:\n print(\"aille\", ldapid, \" --> \", ldapid)\n nom = dico[\"attributes\"][\"displayName\"]\n emploi = dico[\"attributes\"][\"typeEmploi\"]\n mail = dico[\"attributes\"][\"mail\"]\n if \"supannAffectation\" in dico[\"attributes\"].keys():\n supann_affect = dico[\"attributes\"][\"supannAffectation\"]\n else:\n supann_affect = []\n\n if \"supannEntiteAffectationPrincipale\" in dico[\"attributes\"].keys():\n supann_princ = dico[\"attributes\"][\"supannEntiteAffectationPrincipale\"]\n else:\n supann_princ = []\n\n if not len(nom) > 0:\n nom = [\"\"]\n elif not len(emploi) > 0:\n emploi = [\"\"]\n elif not len(mail) > 0:\n mail = [\"\"]\n\n # name,type,function,mail,lab,supannAffectation,supannEntiteAffectationPrincipale,halId_s,labHalId,idRef,structDomain,firstName,lastName,aurehalId\n chercheur = dict()\n # as-t-on besoin des 3 derniers champs ???\n chercheur[\"name\"] = nom\n chercheur[\"type\"] = chercheur_type\n chercheur[\"function\"] = emploi\n chercheur[\"mail\"] = mail[0]\n chercheur[\"orcId\"] = orcid\n chercheur[\"lab\"] = labo_accro # acronyme\n chercheur[\"supannAffectation\"] = \";\".join(supann_affect)\n chercheur[\"supannEntiteAffectationPrincipale\"] = supann_princ\n chercheur[\"firstName\"] = chercheur[\"name\"].split(\" \")[1]\n chercheur[\"lastName\"] = chercheur[\"name\"].split(\" \")[0]\n\n # Chercheur[\"aurehalId\"]\n\n # creation des index\n # progress_recorder.set_progress(5, 10, description='creation des index')\n if not es.indices.exists(index=structid + \"-structures\"):\n es.indices.create(index=structid + \"-structures\")\n if not es.indices.exists(index=structid + \"-\" + labo + \"-researchers\"):\n es.indices.create(index=structid + \"-\" + labo + \"-researchers\")\n es.indices.create(\n index=structid + \"-\" + labo + \"-researchers-\" + ldapid + \"-documents\"\n ) # -researchers\" + row[\"ldapId\"] + \"-documents\n else:\n if not es.indices.exists(\n index=structid + \"-\" + labo + \"-researchers-\" + ldapid + \"-documents\"\n ):\n es.indices.create(\n index=structid + \"-\" + labo + \"-researchers-\" + ldapid + \"-documents\"\n ) # -researchers\" + row[\"ldapId\"] + \"-documents\" ?\n\n chercheur[\"structSirene\"] = structid\n chercheur[\"labHalId\"] = labo\n chercheur[\"validated\"] = False\n chercheur[\"ldapId\"] = ldapid\n chercheur[\"Created\"] = datetime.datetime.now().isoformat()\n\n # New step ?\n\n if idhal != \"\":\n aurehal = get_aurehalId(idhal)\n # integration contenus\n archives_ouvertes_data = get_concepts_and_keywords(aurehal)\n else: # sécurité, le code n'est pas censé être lancé par create car vérification du champ idhal\n return redirect(\"unknown\")\n # retourne sur check() ?\n\n chercheur[\"halId_s\"] = idhal\n chercheur[\"validated\"] = False\n chercheur[\"aurehalId\"] = aurehal # heu ?\n chercheur[\"concepts\"] = archives_ouvertes_data[\"concepts\"]\n chercheur[\"guidingKeywords\"] = []\n chercheur[\"idRef\"] = idref\n chercheur[\"axis\"] = labo_accro\n\n # Chercheur[\"mappings\"]: {\n # \"_default_\": {\n # \"_timestamp\": {\n # \"enabled\": \"true\",\n # \"store\": \"true\",\n # \"path\": \"plugins.time_stamp.string\",\n # \"format\": \"yyyy-MM-dd HH:m:ss\"\n # }\n # }}\n res = es.index(\n index=chercheur[\"structSirene\"] + \"-\" + chercheur[\"labHalId\"] + \"-researchers\",\n id=chercheur[\"ldapId\"],\n body=json.dumps(chercheur),\n refresh=\"wait_for\",\n )\n print(\"statut de la création d'index: \", res[\"result\"])\n return chercheur", "def mourir(self, adversaire=None, recompenser=True):\n try:\n self.script[\"meurt\"][\"avant\"].executer(pnj=self, salle=self.salle,\n adversaire=adversaire)\n except InterrompreCommande:\n Personnage.mourir(self, adversaire=adversaire, recompenser=recompenser)\n else:\n Personnage.mourir(self, adversaire=adversaire, recompenser=recompenser)\n self.script[\"meurt\"][\"apres\"].executer(pnj=self, salle=self.salle,\n adversaire=adversaire)\n cadavre = importeur.objet.creer_objet(importeur.objet.prototypes[\n \"cadavre\"])\n cadavre.pnj = self.prototype\n self.salle.objets_sol.ajouter(cadavre)\n\n importeur.hook[\"pnj:meurt\"].executer(self, adversaire)\n\n # Gain d'XP\n if adversaire and self.gain_xp and recompenser:\n xp = importeur.perso.gen_niveaux.grille_xp[self.niveau][1]\n xp = xp * self.gain_xp / 100\n adversaire.gagner_xp(\"combat\", xp)\n\n importeur.pnj.supprimer_PNJ(self.identifiant)", "def comsume_msg(self, msg_type):", "def search_clues(self):\r\n print(\"\\n************Searching Clues************\\n\")\r\n for word_id in self.words.keys():\r\n if not self.words[word_id].see and not self.words[word_id].wth:\r\n clue = pop_backslash(self.words[word_id].clue)\r\n temp = word_domain(\"allintext:\" + clue +' -crossword',self.words[word_id].length)\r\n temp2 = temp + word_domain(clue +' -crossword',self.words[word_id].length)\r\n domain = temp2 + data_muse(clue, self.words[word_id].length)\r\n unique_list = []\r\n for x in domain: \r\n y = x.upper()\r\n # check if exists in unique_list or not \r\n if y not in unique_list: \r\n unique_list.append(y) \r\n \r\n self.words[word_id].assign_word_domain(unique_list)\r\n print(\"\\nSearch is done...\")", "def recherche_article(self,reference) :\n if int(reference) == self.get_Ref():\n print(self)\n return True\n return False,print(\"No article existed\")", "def devolverDetalle(self):\n\n rowActual=self.tableFactura.currentItem().row()\n signal = QtGui.QMessageBox.information(self,\"Confirmación\",\"¿Desea devolver este item?\",\\\n QtGui.QMessageBox.Close | QtGui.QMessageBox.Ok)\n\n if signal == QtGui.QMessageBox.Ok:\n\n producto = int(self.tableFactura.item(rowActual,1).text())\n cantidad_detalle = int(self.tableFactura.item(rowActual,2).text())\n linea = int(self.tableFactura.item(rowActual,0).text())\n nro_factura = int(self.lineNumero.text())\n detalle = FacturaModel.getDetalle(nro_factura,linea,self.sesion)\n lotes_detalle = detalle.devolverLotes(self.sesion)\n temp = lotes_detalle\n\n finalize_actualizacion = False\n cantidad_restante = cantidad_detalle\n\n while not finalize_actualizacion:\n\n cantidad, ok = QtGui.QInputDialog.getInt(self,\"Cantidad\",\"Ingrese cantidad del producto\",1,1,2000,5)\n if ok == False:\n finalize_actualizacion = True\n self.tableFactura.item(rowActual,2).setText(str(cantidad_detalle))\n break\n lote, ok=QtGui.QInputDialog.getText(self,\"Lote\",\"Ingrese lote\")\n if ok == False:\n finalize_actualizacion = True\n self.tableFactura.item(rowActual,2).setText(str(cantidad_detalle))\n break\n if not lote in lotes_detalle.keys():\n QtGui.QMessageBox.information(self,\"Aviso\",\"El lote ingresado no es valido para este detalle\")\n elif lotes_detalle[str(lote)] == 0:\n QtGui.QMessageBox.information(self,\"Aviso\",\"Los productos de este lote ya han sido devueltos\")\n elif cantidad > lotes_detalle[str(lote)]:\n QtGui.QMessageBox.information(self,\"Aviso\",\"La cantidad ingresada es mayor a la esperada para este lote\")\n else:\n temp[str(lote)] -= cantidad\n cantidad_restante -= cantidad\n self.tableFactura.item(rowActual,2).setText(str(cantidad_restante))\n\n if sum(map(lambda x: temp[x],temp)) == 0:\n self.productosSeleccionados +=1\n key = int(self.tableFactura.item(rowActual,0).text())\n self.detallesDevueltos[key] = detalle\n self.armarItem(self.obtenerValoresItem(rowActual),cantidad_detalle,key)\n self.tableFactura.removeRow(rowActual)\n finalize_actualizacion = True", "def delete(self, usuario_actual, id_lista_de_reproduccion, id_cancion):\n error_no_existe_lista_reproduccion = ValidacionListaDeReproduccion. \\\n validar_no_existe_lista_de_reproduccion(id_lista_de_reproduccion)\n if error_no_existe_lista_reproduccion is not None:\n return error_no_existe_lista_reproduccion, 404\n error_no_es_dueno = ValidacionListaDeReproduccion. \\\n validar_usuario_es_dueno_de_lista_de_reproduccion(id_lista_de_reproduccion, usuario_actual.id_usuario)\n if error_no_es_dueno is not None:\n return error_no_es_dueno, 403\n error_cancion_no_en_lista = ValidacionListaDeReproduccion. \\\n validar_existe_cancion_en_lista_de_reproduccion(id_lista_de_reproduccion, id_cancion)\n if error_cancion_no_en_lista is not None:\n return error_cancion_no_en_lista, 404\n cancion = Cancion.obtener_cancion_por_id(id_cancion)\n lista_de_reproduccion = ListaDeReproduccion.obtener_lista_de_reproduccion(id_lista_de_reproduccion)\n lista_de_reproduccion.quitar_cancion(cancion)\n return cancion.obtener_json_con_album(), 202", "def nao_quer_beber(self, cliente):\n self.vazio.acquire()\n with self.lock:\n self.buff_n_quer.append(cliente)\n # libera lara um garcom retira-lo\n self.cheio.release()", "def rencontrer(self, joueur):\n print(\"Vous rencontrez le mythique \" + self._nom + \"!! Il détient peut-être un objet rare.\")\n input()", "def travailler_enveloppes(self, enveloppes):\n elements = enveloppes[\"l\"]\n elements.apercu = \"{valeur}\"\n elements.aide_courte = \\\n \"Entrez |ent|le nom d'un rang|ff| pour l'éditer ou :\\n\" \\\n \" |ent|/a <nom de l'élément à créer> / <probabilité> / <points> \" \\\n \"|ff|\\n (Exemple : |cmd|/a bras gauche / 8 / 3|ff|)\\n\" \\\n \" |ent|/s <nom de l'élément à supprimer>|ff|\\n\\n\" \\\n \"La probabilité de toucher un élément est calculée en \" \\\n \"fonciton\\nde la probabilité totale de tous les éléments.\\n\\n\" \\\n \"Éléments actuels de la cible :{valeur}\"", "def mostrar_mejores_disparos(self):\n participantes = self.__disparos.copy()\n mejores_disparos = self.__calcular_mejores_disparos(participantes)\n for mejor_disparo in mejores_disparos:\n print(\n f\"\"\"\n =================================\n ====== PARTICIPANTE Nº: {mejor_disparo['nroParticipante']} ======\n =================================\n Disparos: {mejor_disparo['disparos']},\n Nombre: {mejor_disparo['nombre']},\n Apellido: {mejor_disparo['apellido']},\n Mejor disparo: {mejor_disparo['mejor_disparo']}\n =================================\n =================================\n \"\"\"\n )", "def inutiliza_por_lote(self, notas, serie, justificativa): #ok\r\n # notas eh uma lista\r\n notas = notas # notas\r\n serie = str(serie) # serie\r\n if not justificativa:\r\n justificativa = \"Numeracao nao utilizada!\" \r\n\r\n for i in notas:\r\n nnf = str(i)\r\n #serie = \"0\"\r\n self.inutiliza_por_nota(nnf, serie, justificativa)", "def cellules(self): # itérateur rendu safe\n cellule_courante = self.tete\n while cellule_courante is not None:\n cellule_suivante = cellule_courante.suivant # sauvegarde\n yield cellule_courante\n cellule_courante = cellule_suivante # récupération de la sauvegarde", "def Vcontacts(\n # Selectors\n leftSelector='', rightSelector='',\n # Left side positive filters\n chainLeftIn='',resiNumLeftIn='',resiNameLeftIn='',atomSerialLeftIn='',\n atomNameLeftIn='',\n # Left side negative filters\n chainLeftOut='',resiNumLeftOut='',resiNameLeftOut='', atomSerialLeftOut='',\n atomNameLeftOut='',\n # Right side positive filters\n chainRightIn='',resiNumRightIn='',resiNameRightIn='',atomSerialRightIn='',\n atomNameRightIn='',\n # Right side negative filters\n chainRightOut='',resiNumRightOut='',resiNameRightOut='',atomSerialRightOut='',\n atomNameRightOut='',\n # Contact Area\n contactAreaMin='',contactAreaMax='',\n # Minimal distance\n minimalDistanceMin='',minimalDistanceMax='',\n # Sequence separation\n seqSeparationMin='',seqSeparationMax='',\n # Misc.\n model='', solvent='False', color='white', invert='False', opacity='1',\n # Server connection\n host='127.0.0.1', port='8888',\n # Debug mode\n debug='False'\n ):\n\n # Logger level\n logging_level = logging.INFO if not Bool(debug) else logging.DEBUG\n\n # Init logger\n logging.basicConfig(format='%(levelname)s:%(message)s', level=logging_level)\n\n # Loggin error wrapper\n logging.parser_error = CallCounter(logging.error)\n\n # Get model from selectors\n sele_model = get_selectors_model(leftSelector, rightSelector)\n\n if sele_model:\n model = sele_model\n else:\n model = get_model(model)\n\n params = params_parser(solvent, color, invert, opacity)\n\n if logging.parser_error.counter != 0:\n return\n\n # Append atom serials\n atomSerialLeftIn = atomSerialLeftIn + get_serials(leftSelector)\n atomSerialRightIn = atomSerialRightIn + get_serials(rightSelector)\n\n # Compose query commands\n Vfilter = compose(\n # Left side positive filters\n chainLeftIn, resiNumLeftIn, resiNameLeftIn, atomSerialLeftIn,\n atomNameLeftIn,\n # Left side negative filters\n chainLeftOut, resiNumLeftOut, resiNameLeftOut, atomSerialLeftOut,\n atomNameLeftOut,\n # Right side positive filters\n chainRightIn, resiNumRightIn, resiNameRightIn, atomSerialRightIn,\n atomNameRightIn,\n # Right side negative filters\n chainRightOut, resiNumRightOut, resiNameRightOut, atomSerialRightOut,\n atomNameRightOut,\n # Contact Area\n contactAreaMin, contactAreaMax,\n # Minimal distance\n minimalDistanceMin, minimalDistanceMax,\n # Sequence separation\n seqSeparationMin, seqSeparationMax\n )\n\n\n query = json.dumps({\n 'filter': Vfilter,\n 'params': params\n })\n\n try:\n # Create TCP client obj\n client = TCPClient(host, port)\n # Start TCP client\n client.start()\n except Exception as e:\n logging.critical(e)\n logging.info('Server might not be running')\n return\n\n try:\n # Check if server has PDB file\n if not client.check_file(model):\n client.send_file(model)\n\n cgo_path = client.get_cgo(model, query)\n\n except socket.timeout as e:\n logging.error(\"Connection time out.\")\n return\n except Exception as e:\n logging.error(\"Server side error\")\n return\n\n del client\n\n # draw CGOs\n draw_CGO(cgo_path)\n\n return", "def petite_partie(joueur1: object,\n joueur2: object,\n tableau_invisible_joueur1: list, tableau_invisible_joueur2: list\n ):\n print(\"plateau du joueur 2 : \\n\")\n\n tour_de_jeu(joueur1, joueur2, tableau_invisible_joueur2)\n\n rafraichir_position(joueur2, joueur2.porte_avion, joueur2.torpilleur, joueur2.croiseur)\n verif_bateau(joueur1, joueur2.porte_avion, joueur2.torpilleur, joueur2.croiseur)\n\n print(\"plateau du joueur 1 : \\n\")\n tour_de_jeu(joueur2, joueur1, tableau_invisible_joueur1)\n\n rafraichir_position(joueur1, joueur1.porte_avion, joueur1.torpilleur, joueur1.croiseur)\n verif_bateau(joueur2, joueur1.porte_avion, joueur1.torpilleur, joueur1.croiseur)", "def anotar_pedido(self, garcom):\n with self.lock_anotacao:\n # verifica se todos ja foram atendidos\n if self.tot_anotado < self.n_clientes:\n self.cheio.acquire()\n with self.lock:\n # verifica se o cliente adicionado bebera ou nao\n if len(self.buff_quer) == 1:\n cliente_atendido = self.buff_quer.popleft()\n else:\n cliente_atendido = self.buff_n_quer.popleft()\n\n # libera para um novo cliente ser adicionado\n self.vazio.release()\n self.tot_anotado += 1\n # retorna o cliente que foi atendido\n return cliente_atendido\n else:\n # retornando None o garcom sabera que todos da rod. foram atend\n return None", "def trouver_navires(navire):\n # On recherche d'abord le personnage\n equipage = navire.equipage\n vigies = equipage.get_matelots_au_poste(\"vigie\")\n if vigies:\n personnage = vigies[0]\n else:\n personnage = navire.personnages[0]\n\n portee = get_portee(personnage.salle)\n points = Visible.observer(personnage, portee, 5,\n {\"\": navire})\n navires = [p[1] for p in points.points.values() if \\\n isinstance(p[1], type(navire))]\n return navires", "def cantidad_participantes(self):\n print(\n f\"\"\"\n ================================================\n ======== SE ENCONTRARON {len(self.__disparos)} PARTICIPANTES ========\n ================================================\n \"\"\"\n )", "def post(self, request):\n form = DelEventoForm(request.POST)\n if form.is_valid():\n try:\n u = Evento.objects.get(id = form.cleaned_data['id'])\n correo = request.POST.get('correo', '')\n\n v = RegEvento.objects.all()\n \n\n\n for i in v:\n if(i.id_Evento == u.id and i.email_Usuario == correo):\n print(str(correo) + \"Elminado del evento\" + str(i.id_Evento))\n send_mail(\n 'Anulacion de invitacion',\n 'Has sido dado de baja del evento',\n 'pumaeventosunam@gmail.com',\n [i.email_Usuario],\n fail_silently=False,\n ) \n i.delete()\n\n \n except:\n print(\"no existe\") \n\n return render(request, self.template, self.context)", "def confirmacio_registre(data, sock, address, equips, dades_servidor):\n for equip in equips:\n if equip['nom'].__eq__(data[1:6]) and equip['mac'].__eq__(data[8:20]):\n if data[21:27].__eq__(equip['aleatori']) or not equip['estat'].__eq__('DISCONNECTED'):\n if equip['estat'].__eq__('DISCONNECTED'):\n equip['estat'] = 'REGISTERED'\n equip['address'] = address\n print_if_debug(DEBUG,\n 'Acceptat registre. Equip: nom=' + equip['nom'] + ', ip=' + address[0] + ', mac=' +\n equip['mac'] + ', alea=' + data[21:27])\n print_with_time('MSG. => L\\'equip ' + data[1:6] + ' passa a estat ' + equip['estat'])\n enviar_reg_ack(sock, address, dades_servidor, equip)\n return True, equip\n elif not equip['address'].__eq__(address):\n enviar_reg_nack(sock, address, 'Discrepancies amb IP')\n return False, equip\n else:\n enviar_reg_ack(sock, address, dades_servidor, equip)\n return True, equip\n else:\n enviar_reg_nack(sock, address, 'Discrepancies amb el nombre aleatori')\n return False, equip\n\n enviar_reg_rej(sock, address, 'Equip no autoritzat en el sistema.')\n return False, None", "def entre_primeros_colas_recursos(self, recursos):\r\n\r\n entre_primeros = False\r\n recursos_manipuleo = [\r\n recursos[\"Estacion Volcadora\"],\r\n recursos[\"Estacion Tolva/Balanza 3\"],\r\n recursos[\"Pala Mecanica\"],\r\n recursos[\"Cuadrilla de Estibaje\"],\r\n recursos[\"Cabina de Recepcion - T1\"],\r\n recursos[\"Cabina de Despacho - T1\"],\r\n recursos[\"Cabina de Recepcion - T2\"],\r\n recursos[\"Cabina de Despacho - T2\"],\r\n recursos[\"Grua\"]]\r\n\r\n if any(self.entre_primeros_cola_recurso(recurso)\r\n for recurso in recursos_manipuleo):\r\n entre_primeros = True\r\n return entre_primeros", "async def çıkış(con):\r\n check=str(con.message.channel)\r\n if check == 'Direct Message with {}'.format(con.message.author.name):#COMMAND USED IN DM\r\n await bot.send_message(con.message.channel,\"**You must be in a `server voice channel` to use this command**\")\r\n\r\n if check != 'Direct Message with {}'.format(con.message.author.name):#COMMAND NOT IN DM\r\n \r\n # IF VOICE IS NOT CONNECTED\r\n if bot.is_voice_connected(con.message.server) == False:\r\n await bot.send_message(con.message.channel,\"**Bot kanala bağlanmamış !**\")\r\n\r\n # VOICE ALREADY CONNECTED\r\n if bot.is_voice_connected(con.message.server) == True:\r\n bot.loop.create_task(queue_songs(con,True))", "def DeleteComite(request,pk):#esta enlazado con la clase FaseForm del archivo getion/forms\n\n proyecto = User_Proyecto.objects.filter(proyecto_id=pk)\n gerente = User.objects.get(id=proyecto[0].user_id)\n print(gerente.username)\n\n proyecto_validar=Proyecto.objects.get(id_proyecto=pk)\n\n if validar_permiso(request.user, \"is_gerente\",proyecto_validar)==False: # primero se valida si es gerente en el proyecto actual)\n messages.error(request, 'No eres gerente de proyecto, por lo tanto no puedes eliminar el comite de cambio')\n return redirect('gestion:comite', pk)\n\n comite = Comite.objects.all()\n form = Usuario.objects.all()\n proyectos=Proyecto.objects.get(id_proyecto=pk)\n\n if request.method == 'POST': #preguntamos primero si la petición Http es POST ||| revienta todo con este\n some_var=request.POST.getlist('checkbox')\n\n if ((len(some_var)+1)%2==0 or (len(some_var)+1)==1):# SE VALIDA QUE DEBE DE SER IMPAR Y MAYOR A 1\n messages.error(request,'EL NUMERO DE USUARIOS EN EL COMITE DEBE DE SER IMPAR Y MAYOR A UNO')\n return redirect('gestion:DeleteComite',pk)\n\n for id in some_var:\n id_user =id\n usuario = User.objects.get(id=id_user)\n registrarAuditoriaProyecto(request.user, \"Desvinculo del comite de cambio al usuario: \" + str(usuario.username),\n proyectos.id_proyecto, proyectos.nombre, \"\")\n\n desvinculacionComite(request,pk,id_user)\n\n\n return redirect('gestion:comite',pk)\n else:\n list=[]\n if(comite != None):\n for i in range(form.count()):\n ok = False\n if form[i].esta_aprobado == True:\n for x in comite:\n if x.id_user == form[i].user.id and x.id_proyecto == pk:\n ok=True\n if ok:\n list.append(form[i].user.id)\n print(list)\n return render(request, 'proyectos/delete_comite.html', {'form': form,'list':list,'pk':pk,'proyectos':proyectos,'idGerente':gerente.id})", "def acquisizioneParametri(self):\n\n messaggio =''\n\n try: \n self.__rete = slugify(self.ui.nomeRete.text())\n # controllo se la lunghezza del nome inserito sia > di 5 caratteri\n if(len(self.__rete) < 5 or len(self.__rete) > 30):\n\n messaggio = 'err: inserimento Nome'\n raise NameError\n \n # controllo che il nome scelto sia univoco\n isPresent = self.__NNNameCheck()\n if(isPresent):\n messaggio = 'err: nome già utilizzato'\n raise NameError\n\n # controlli su numero layer e numero nodi che siano >= 1\n # e che siano rispettivamente <= 20 e <= 50\n self.__layer = int(self.ui.nLayer.text())\n if(self.__layer < 1):\n messaggio = 'err: numero layer < 1'\n raise ValueError\n elif(self.__layer >= 20):\n messaggio = 'err: numero layer > 20'\n raise ValueError\n\n self.__nodi = int(self.ui.nNodi.text())\n if(self.__nodi < 1):\n messaggio = 'err: numero nodi < 1'\n raise ValueError\n if(self.__nodi >= 50):\n messaggio = 'err: numero nodi > 50'\n raise ValueError\n\n # salvataggio della funzione scelta\n self.__funzione = self.ui.funzione.currentText()\n \n # controllo che la percentuale di Vs sia < 25%\n # e che la percentuale di Ts sia > 75%\n if(self.__percentuale < 25):\n messaggio = 'err: suddivisione'\n raise ValueError\n if (self.__percentuale > 75):\n messaggio = 'err: suddivisione'\n raise ValueError\n\n # controllo che sia stato scelto effettivamente un dataset\n if(len(self.__dataSet) == 0):\n messaggio = 'err: dataSet errato'\n raise NameError\n\n # setto il tasto caricamento di una rete non cliccabile\n self.ui.but_caricaRete.setEnabled(False)\n\n # cambio nome del tasto convalida\n self.ui.but_convalida.setText('confermato')\n self.ui.comunicazione.setText('')\n #abilito salvataggio\n self.ui.but_salva.setEnabled(True)\n\n # settandola a True permetto che il training venga effettuato\n # dato che i dati inseriti sono validi\n self.__convalida = True\n return True\n except:\n # in caso di eccezzioni faccio comparire il messaggio\n self.ui.comunicazione.setText(messaggio)\n return False", "def subarrayOwnsMultipleCorrelators(subNo) :\n return not(s.subarrayOwnsSingleCorrelator(subNo) or s.subarrayOwnsNoCorrelator(subNo));", "def nao_tem_passageiros(self):\n return self.counter.ja_viajaram == self.counter.num_passageiros", "def on_autentificar(self, dni, nom, apellidos, telf, zona, nacion):\n condicion = True\n\n if dni == '':\n self.txtDni.set_placeholder_text(\"Inserte DNI\")\n condicion = False\n elif nom == '':\n self.txtNombre.set_placeholder_text(\"Inserte Nombre\")\n condicion = False\n elif apellidos == '':\n self.txtApellidos.set_placeholder_text(\"Inserte Apellidos\")\n condicion = False\n elif telf == '':\n self.txtTelf.set_placeholder_text(\"Inserte Telefono\")\n condicion = False\n elif zona == '':\n self.txtZona.set_placeholder_text(\"Inserte Zona\")\n condicion = False\n elif nacion == '':\n self.txtNacionalidad.set_placeholder_text(\"Inserte Nacionalidad\")\n condicion = False\n\n return condicion", "def filterRansac():\n pass", "def direita(self, msg, match):\n\n x, y, sentido = self.atualizar_sentido_do_jogador(\"direita\")\n yield \"Novo sentido: \" + sentido", "def check_to_Done(self,cr,uid,ids,context=None):\n\n for rec in self.browse(cr, uid, ids, context):\n if not rec.maintenance_id: \n raise osv.except_osv(_('ValidateError'), _(\"There Is NO maintenace request refrence to this accident.\"))\n return False\n return True", "def fill_dico_by_corr(colonne_parcourue_arr, corel_arr, dico, column_name):\n for col in range(0, len(column_name)):\n if colonne_parcourue_arr[col] == False:\n dico[col] = [column_name[col]]\n parcourir_colonne(col, colonne_parcourue_arr, corel_arr, dico, col, column_name)\n return dico", "def afficher_nom(self, message, color='black'):\n\n self.labNoms['foregrounnd'] = color\n self.labNoms['text'] = message\n\n self.afficher_nom(\"{} ({}) VS {} ({})\".format(self.p1.nom, self.p1.pion, self.p2.nom, self.p2.pion))", "def sucessor(self, no):\n if no is not None:\n if no.getDireito() is not None:\n return self.minimo(no.getDireito())\n else:\n pai = no.getPai()\n while pai is not None and no is pai.getDireito():\n no = pai\n pai = no.getPai()\n return pai", "def dryrecs():\n click.echo(\"Recommendations, not emailed: \")\n dio_dir: DioDir = DioDir()\n sched: ScheduleABC = DefaultSchedule()\n today: datetime.date = datetime.datetime.now().date()\n res: Optional[List[Person]] = get_recs(dio_dir, sched, today)\n next_day: datetime.date = sched.next_emailing_day(today)\n click.echo(recs_to_message(res, next_day))", "def grande_partie(joueur1: object, joueur2: object,\n tableau_invisible_joueur1: list, tableau_invisible_joueur2: list,\n ):\n print(\"plateau du joueur 2 : \\n\")\n\n tour_de_jeu(joueur1, joueur2, tableau_invisible_joueur2)\n\n rafraichir_position(joueur2, joueur2.porte_avion, joueur2.torpilleur, joueur2.croiseur, joueur2.canonniere,\n joueur2.destroyer)\n verif_bateau(joueur1, joueur2.porte_avion, joueur2.torpilleur, joueur2.croiseur, joueur2.canonniere,\n joueur2.destroyer)\n\n print(\"plateau du joueur 1 : \\n\")\n tour_de_jeu(joueur2, joueur1, tableau_invisible_joueur1)\n\n rafraichir_position(joueur1, joueur1.porte_avion, joueur1.torpilleur, joueur1.croiseur, joueur1.canonniere,\n joueur1.destroyer)\n verif_bateau(joueur2, joueur1.porte_avion, joueur1.torpilleur, joueur1.croiseur, joueur1.canonniere,\n joueur1.destroyer)", "def activer(self):\n if self.txt == self.msg: self.txt=\" \" #Efface juste le message de saisie...\n self.actif=True\n self.afficher()", "def contacts_list_update(self):\n\t\tself.database.contacts_clear()\n\t\tclient_log.debug(f'Запрос контакт листа для пользователся {self.name}')\n\t\treq = {\n\t\t\tACTION: GET_CONTACTS,\n\t\t\tTIME: time.time(),\n\t\t\tUSER: self.username\n\t\t}\n\t\tclient_log.debug(f'Сформирован запрос {req}')\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tans = get_message(self.transport)\n\t\tclient_log.debug(f'Получен ответ {ans}')\n\t\tif RESPONSE in ans and ans[RESPONSE] == 202:\n\t\t\tfor contact in ans[LIST_INFO]:\n\t\t\t\tself.database.add_contact(contact)\n\t\telse:\n\t\t\tclient_log.error('Не удалось обновить список контактов.')", "def composantesConnexes(modele):\n \n num_composantes = {}\n liste_sommets = modele.getListeSommets()\n compteur_comp = 1\n \n for x in liste_sommets :\n # si x n'a pas de numéro, lancer un parcours...\n if x not in num_composantes :\n attente = deque([x])\n num_composantes[x] = compteur_comp\n modele.addTexte(x,compteur_comp)\n while attente:\n courant = attente.pop()\n for vois in modele.getVoisins(courant):\n if not vois in num_composantes:\n attente.append(vois)\n num_composantes[vois] = compteur_comp\n modele.addTexte(vois,compteur_comp)\n \n compteur_comp += 1\n modele.observateur.update()", "def desactiver(self):\n self.actif=False\n if self.txt == \"\":self.txt=self.msg #Permet de remettre l instruction si la zone de saisir est vide...\n self.afficher()", "def positionTerminale(position):\n if(position['graines']['NORD']>=25 or position['graines']['SUD']>=25): \n return True \n for a in range(1,position['taille']+1):\n if(coupAutorise(position,a)):\n return False #on renvoie false dès qu'on obtient au moins un coup jouable\n return True", "def depassementTest(self):\n self.text = Texte(self.ecran, self.txt, self.police, self.taille, self.pos, self.couleurText) # Actualise\n self.textRect = self.text.textRend.get_rect() # Actualise\n self.dim = (self.textRect.w + 5, self.textRect.h + 5) # Actualise\n if self.pos[0]+self.dim[0] >= self.ecran.dim[0] or self.pos[0]+self.dim[0] >= self.ecran.dim[0]: # ou si chevauchement en parcourant la liste des widgets ...\n return True # Renvoie True -> il y a un depassement", "def fusionne(self, new):\n if new == self:\n raise ValueError(\"un titre ne peut être fusionné avec lui même\")\n self.alters_data = True\n if not isinstance(new, type(self)):\n raise TypeError(\"pas la même classe d'objet\")\n if self.type != new.type:\n raise TypeError(\"pas le même type de titre\")\n for cours in self.cours_set.all():\n try:\n if new.cours_set.get(date=cours.date).valeur != cours.valeur:\n raise Gsb_exc(\n \"attention les titre %s et %s ne peuvent etre fusionné à cause histo de cours\" % (self, new))\n except Cours.DoesNotExist:\n new.cours_set.create(date=cours.date, valeur=cours.valeur)\n nb_change = 0\n nb_change += Ope_titre.objects.filter(titre=self).update(titre=new)\n # on doit aussi reaffecter le tiers associe\n self.tiers.fusionne(new.tiers, ok_titre=True)\n self.delete()\n return nb_change", "def relacionarItem(request,id_proyecto,id_item):\n try:\n proyecto=Proyecto.objects.get(id_proyecto=id_proyecto)#se obtiene el proyecto\n fases=Fase.objects.filter(id_Proyecto=proyecto).order_by('id_Fase')#se obtienen las fases del proyecto\n itemActual=Item.objects.get(id_item=id_item)\n list=[]\n list=lista_items_relacion(itemActual,fases,id_proyecto,id_item)\n items = Item.objects.filter(actual=True)\n except:\n\n return HttpResponse(request, \"id de TI invalida\",status=400)\n\n\n if validar_permiso(request.user,\"is_gerente\",proyecto) or request.user.has_perm('crear_item',proyecto) and validar_rol_fase('crear_item',itemActual.fase,request.user):\n print('tiene el permiso de crear_item')\n else:\n messages.error(request,\"NO SE POSEE EL PERMISO: crear_item\" + \" SOLICITE EL PERMISO CORRESPONDINTE PARA REALIZAR LA ACCION\")\n return redirect('gestion:detallesFase', atributos.ti.fase.id_Fase)\n\n if request.method == 'POST': #preguntamos primero si la petición Http es POST ||| revienta todo con este\n some_var=request.POST.getlist('checkbox')\n #print(some_var)\n lis=[]\n proyecto = Proyecto.objects.get(id_proyecto=id_proyecto)\n fases = Fase.objects.filter(id_Proyecto=proyecto).order_by('id_Fase')\n item = Item.objects.filter(id_item=id_item)\n # hacer una funcion\n # [1][2][3]\n # [3][2][1]\n #VERIFICAR SI ES DE LA PRIMERA FASE SIN RELACIONES, SINO MOSTRAR ERROR\n if(lis==some_var):\n print(fases[0].id_Fase)\n print(item[0].fase.id_Fase)\n if fases[0].id_Fase!= item[0].fase.id_Fase:#sino es igual a la primera fase muestra error\n context = {\n \"mensaje\": \"EL ITEM NO ES DE LA PRIMERA FASE, POR ENDE DEBE DE CONTAR CON RELACION Y TENER DE FORMA DIRECTA O INDIRECTA RELACION CON LA PRIMERA FASE DEL PROYECTO \",\n \"titulo\": \"ITEM SIN RELACION\",\n \"titulo_b1\": \"AÑADE RELACION\",\n \"boton1\": \"/relacionarItem/\" + str(id_proyecto)+\"/\"+str(id_item),\n \"titulo_b2\": \"CANCELAR ITEM\",\n \"boton2\": \"/itemCancelado/\",\n }\n return render(request, 'Error.html', context)\n\n if fases[0].id_Fase!= item[0].fase.id_Fase:#sino es igual a la primera fase muestra error\n #VERIFICAR SI TIENE RELACION CON LA F1\n if(primeraFase(id_proyecto, id_item, some_var)==True):\n context = {\n \"mensaje\": \"EL ITEM NO TIENE RELACION CON LA PRIMERA FASE POR ENDE NO ES VALIDO, FAVOR VOLVER A REALIZAR RELACIONES Y VOLVER CONSISTENTE EL ITEM \",\n \"titulo\": \"ITEM SIN RELACION CON LA FASE 1\",\n \"titulo_b1\": \"VOLVER A AÑADIR RELACION\",\n \"boton1\": \"/relacionarItem/\" + str(id_proyecto)+\"/\" + str(id_item),\n \"titulo_b2\": \"CANCELAR ITEM\",\n \"boton2\": \"/itemCancelado/\",\n }\n return render(request, 'Error.html', context)\n\n #VERIFICAR SI SE GENERAN CICLOS--------- INCONSISTENCIAS\n\n registrarAuditoriaProyecto(request.user,'creo el item: '+str(item[0].nombre),id_proyecto,proyecto.nombre,item[0].fase.nombre)\n\n for id in some_var:###### SE GUARDAN LAS RELACIONES\n itemSeleccionado=Item.objects.get(id_item=id)\n if(itemSeleccionado.fase.id_Fase > itemActual.fase.id_Fase):#si el item es sucesor, sera apuntado por el item creado\n p = Relacion(fin_item=id,inicio_item=id_item)\n p.save()\n print(itemActual.nombre,\" --> \",itemSeleccionado.nombre)\n else:# sino es el sucesor, seguira siendo apuntado por los seleccionados\n p = Relacion(fin_item=id_item,inicio_item=id)\n p.save()\n print(itemActual.nombre,\" <-- \",itemSeleccionado.nombre)\n\n #----------------------------------------------------------#\n ## se puede volver generico si se restringe preguntando si el item es igual al ultimo\n version=Versiones(id_Version=1,id_item=id_item,id_padre=id_item)#SE GUARDA LA VERSION\n version.save()\n #----------------------------------------------------------#\n\n return redirect('gestion:detallesFase',item[0].fase.id_Fase)\n else:\n return render(request, 'items/relacionarItem.html', {'form': items,'list':list,'itemActual':itemActual})", "def abrir(self):\n assert self.open == False\n self.ne = [n for n in self.ne]\n self.je = [e1 for e1 in self.je]\n self.ie = []\n self.open = True", "def cancela_nfce(self, nnf, serie, chave_acesso, protocolo, justificativa=None):\r\n if not justificativa and nnf:\r\n justificativa = \"Problema na Impressao\"\r\n\r\n self.dll.tCFCancelar_NFCe_Daruma.argstypes = [c_char_p * 5]\r\n status = self.dll.tCFCancelar_NFCe_Daruma(nnf, serie, chave_acesso, protocolo, justificativa)\r\n\tif status !=1:\r\n if status == 0:\r\n\t\traise Exception(\"0: Erro, nao foi possivel comunicar \" + \r\n\t\t \"com a impressora nao fiscal.\")\r\n\t if status == -1:\r\n\t\traise Exception(\"-1: Cancelamento nao autorizado.\")\r\n\t elif status == -2:\r\n\t\traise Exception(\"-2: Chave invalida.\")\r\n\t elif status == -3:\r\n\t\traise Exception(\"-3: Falha no esquema XML.\")\r\n\t elif status == -4:\r\n\t\traise Exception(\"-4: XML fora do padrao.\")\r\n\t elif status == -5:\r\n\t\traise Exception(\"-5: Erro generico.\")\r\n\t elif status == -8:\r\n\t\traise Exception(\"-8: Usuario nao autorizado.\")\r\n\t elif status == -9:\r\n\t\traise Exception(\"-9: Usuario nao licenciado.\")\r\n\t elif status == -10:\r\n\t\traise Exception(\"-10: Documento e ambiente nao identificados.\")\r\n\t elif status == -13:\r\n\t\traise Exception(\"-13: Tipo de documento nao identificado.\")\r\n\t elif status == -14:\r\n\t\traise Exception(\"-14: Erro retornado pelo Web Service.\")\r\n\t elif status == -52:\r\n\t\traise Exception(\"-52: Erro ao gravar em arquivo temporario.\")\r\n\t elif status == -99:\r\n\t\traise Exception(\"-99: Parametros invalidos ou ponteiro nulo.\")\r\n\t elif status == -103:\r\n\t\traise Exception(\"-103: Nao foram encontradas as DLLs auxiliares.\")\r\n\t else:\r\n\t\traise Exception(\"Erro ao executar metodo de cancelamento.\")\r\n\treturn {\"status\": status}", "def remonter_membre(self, nom_membre):\n membre = self.get_membre(nom_membre)\n indice = self.__membres.index(membre)\n if indice != 0: # ne fait rien si le membre est déjà tout en haut\n membre = self.__membres.pop(indice)\n self.__membres.insert(indice - 1, membre)", "def get_nom_complet(self, ids, name):\n\tres = {}\n\tfor persona in self.browse(ids):\n if persona.cognom2:\n cognoms = '%s %s' % (persona.cognom1,\n persona.cognom2)\n else:\n cognoms = persona.cognom1\n res[persona.id] = '%s, %s' % (cognoms, persona.nom)\n\treturn res", "def test_partie(joueur1: object,\n joueur2: object,\n tableau_invisible_joueur1: list, tableau_invisible_joueur2: list\n ):\n print(\"plateau du joueur 2 : \\n\")\n\n tour_de_jeu(joueur1, joueur2, tableau_invisible_joueur2)\n\n rafraichir_position(joueur2, joueur2.porte_avion)\n verif_bateau(joueur1, joueur2.porte_avion)\n\n print(\"plateau du joueur 1 : \\n\")\n tour_de_jeu(joueur2, joueur1, tableau_invisible_joueur1)\n\n rafraichir_position(joueur1, joueur1.porte_avion)\n verif_bateau(joueur2, joueur1.porte_avion)" ]
[ "0.59579074", "0.58650804", "0.56254125", "0.54683447", "0.54610604", "0.5308732", "0.5250842", "0.5229191", "0.5190432", "0.5136807", "0.50936365", "0.50733256", "0.5067113", "0.50111264", "0.49850872", "0.49645695", "0.49620542", "0.49505514", "0.49489334", "0.4907525", "0.49039817", "0.4901957", "0.4889558", "0.48758096", "0.4873744", "0.48622796", "0.48618773", "0.48295382", "0.48232335", "0.48038653", "0.47750095", "0.4758664", "0.47561038", "0.47547004", "0.4745154", "0.47334364", "0.47220743", "0.47065327", "0.4705319", "0.4704497", "0.47032523", "0.47030735", "0.46932447", "0.4691927", "0.4686482", "0.4679341", "0.46685672", "0.46642303", "0.4653622", "0.46474564", "0.46459016", "0.46425095", "0.463822", "0.462921", "0.46291596", "0.4621439", "0.46200532", "0.46100283", "0.46097767", "0.46090844", "0.46048325", "0.45902318", "0.45883146", "0.45853302", "0.45762607", "0.4571889", "0.45715547", "0.45674425", "0.4562967", "0.4554443", "0.4545483", "0.45308873", "0.45283398", "0.4522341", "0.4517922", "0.45137918", "0.4513391", "0.4511722", "0.45047444", "0.44963583", "0.4496166", "0.44901612", "0.44828954", "0.4482687", "0.4481643", "0.44748065", "0.4472586", "0.4471706", "0.4470472", "0.44685435", "0.4468456", "0.44623628", "0.44592622", "0.44575316", "0.44567367", "0.44560254", "0.44556063", "0.44475925", "0.44357347", "0.4435251" ]
0.48496613
27
pour un indice de disque k, la famille des listes de des indices, et l'indice du disque k dans la famille L, renvoie la liste / l'ensemble des indices de disques dont la vitesse va etre modifiee par k
def influence(k,L,n): try: to_check = L[n-1] #set des indices contact_direct=C(k,0) return list(to_check.intersection(contact_direct)) except: return []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_k_indices(self, ks):\n if self.staticneighs:\n idx_ks = ks\n else:\n idx_ks = [self.ks.index(e) for e in ks]\n return idx_ks", "def generate_L(data_set, k, min_support):\n fptree = FPtree.fptree(data_set, min_support)\n print(\"pre:\",datetime.datetime.now())\n fptree.getPretable()\n print(\"pre:\",datetime.datetime.now())\n fptree.getRootTree()\n support_data = {}\n\n # L1,L2,support_L1 = adjacencyMatrix(data_set,min_support,support_data)\n # pretable = sorted(support_L1.items(), key=itemgetter(1, 0), reverse=True)\n # fptree = FPtree.fptree(data_set, min_support,pretable)\n # fptree.getRootTree()\n # Lksub1 = L2.copy()\n C1 = create_C1(data_set)\n # print (C1)\n # print (\"=====================\")\n L1 = generate_Lk_by_Ck(fptree,data_set, C1,1, min_support, support_data)\n Lksub1 = L1.copy()\n L = []\n L.append(Lksub1)\n for i in range(2, k+1):\n Ci = create_Ck(Lksub1, i)\n # print (Ci)\n # print (\"=====================\")\n Li = generate_Lk_by_Ck(fptree, data_set, Ci, i, min_support, support_data)\n Lksub1 = Li.copy()\n L.append(Lksub1)\n return L, support_data", "def k_rank_approximate(doc_matrix, k):\n return []", "def findknn(xTr,xTe,k):\n\n # YOUR CODE HERE\n if k > len(xTr):\n k = len(xTr)\n \n D=l2distance(xTe, xTr)\n (m,n) = D.shape\n \n indices = []\n dists = []\n for i in range(m):\n smallest_indices = np.argsort(D[i])\n ind = smallest_indices[:k]\n dis = D[i,smallest_indices[:k]]\n indices.append(ind)\n dists.append(dis)\n \n indices = np.transpose(np.array(indices))\n dists = np.transpose(np.array(dists))\n return indices, dists", "def combinarink(list, k):\n global visited\n global indexes\n visited = [0 for x in range(0, len(list) + 1)] # init with 0\n indexes = [x for x in range(0, len(list) + 1)] # init indexes with 0...n-1\n output = combinari(1, len(list), k, list, [])\n print (output)", "def _get_neighs_list_dynamic(self, k_is=[0]):\n neighs = [self.idxs[k_i] for k_i in k_is]\n return neighs", "def gkm_rc_indices(l=4, k=3):\n names = gkm_name(l=l, k=k, rev_comp=False)\n collect_seqs = set() # Contains the seqs and rev comps added thus far\n first_index, second_index = [], []\n for i, kmer in enumerate(names):\n if kmer not in collect_seqs:\n collect_seqs.add(kmer) # Add kmer and its RC so we don't process it again\n collect_seqs.add(reverse_complement(kmer))\n first_index.append(i) # Add the pair indices\n second_index.append(names.index(reverse_complement(kmer)))\n assert len(first_index) == len(second_index)\n return np.vstack((first_index, second_index)).astype(int)", "def _set_neighs_list_list_list(self, key):\n self.ks = list(range(len(key))) if self.ks is None else self.ks\n if self._constant_neighs:\n self.idxs = np.array(key)\n else:\n self.idxs = key\n if len(self.idxs[0]) != len(self.iss):\n self.iss = list(range(len(self.idxs[0])))\n if self.staticneighs:\n self.idxs = self.idxs[0]\n self._setted = True", "def k(self):\n self.kTable()", "def indices_hkl(self, H, K, L):\n from cctbx import miller\n _symm_equiv = miller.sym_equiv_indices(self.sg, (H, K, L))\n _indices = sorted([i.h() for i in _symm_equiv.indices()],\n reverse=True)\n if len(_indices) < _symm_equiv.multiplicity(False):\n _indices = _indices + [(-hh, -kk, -ll)\n for (hh, kk, ll) in _indices]\n return _indices", "def _set_neighs_list_list(self, key):\n if self._constant_neighs:\n key = np.array(key)\n if self.staticneighs:\n self.idxs = key\n self.ks = range(1) if self.ks is None else self.ks\n else:\n self.ks = range(1) if self.ks is None else self.ks\n len_ks = len(self.ks)\n self.idxs = [key for k in range(len_ks)]\n if type(key) == np.ndarray:\n self.idxs = np.array(self.idxs)\n if len(self.iss) != len(key):\n if len(self.iss) != len(key):\n self.iss = range(len(key))\n# if len(self.idxs[0]) > 0:\n# self.iss = list(range(len(self.idxs)))\n self._setted = True", "def print_idxlist_to_textlists(self, idx_list, worst=True, k=None, devData=None, y_pred=None, \\\n print_window=True, dataClass=None, return_indices=False): \n print (\"indices counts =\", idx_list.shape[0])\n boo = \"worst\" if worst else \"best\"\n print (\"ranked by {} cross-entropy loss\".format(boo))\n \n idx_list = [idx for (idx,ce) in self.rank_predictions(idx_selected=idx_list, worst=worst) ]\n ce_list = [ce for (idx,ce) in self.rank_predictions(idx_selected=idx_list, worst=worst) ]\n if k is not None:\n print (\"top {} results\".format(k))\n idx_list = idx_list[:k]\n ce_list = ce_list[:k] \n \n devData = (self.devX, self.devX_pos, self.devX_capitals, self.devY) if (devData is None) else devData\n y_pred = self.y_pred if (y_pred is None) else y_pred\n dataClass = self.dataClass if (dataClass is None) else dataClass\n \n word_windows = list(map(dataClass.vocab.ids_to_words, devData[0][idx_list]))\n pos_windows = list(map(dataClass.posTags.ids_to_words, devData[1][idx_list]))\n capital_windows = list(map(dataClass.capitalTags.ids_to_words, devData[2][idx_list])) \n gold_ner_class = [dataClass.nerTags.ids_to_words([tag]) for tag in devData[3][idx_list]]\n pred_ner_class = [dataClass.nerTags.ids_to_words([tag]) for tag in y_pred[idx_list]] \n\n if word_windows:\n cen = len(word_windows[0])//2 \n for i in range(len(word_windows)):\n print (\"\\nID {}\".format(idx_list[i]))\n print (\"KL divergence {}\".format(ce_list[i]))\n print (\"FEATURES: \\\"{}\\\", {}, {}\".format(word_windows[i][cen], pos_windows[i][cen], \\\n capital_windows[i][cen]))\n print (\"Gold NER {}\".format(gold_ner_class[i]))\n print (\"Pred NER {}\".format(pred_ner_class[i]))\n if print_window:\n print (\"Text window {}\".format(word_windows[i]))\n print (\"PoS window {}\".format(pos_windows[i]))\n print (\"Caps window {}\".format(capital_windows[i]))\n else:\n print (\"empty -- no predictions were made\")\n\n if return_indices:\n return idx_list", "def _get_neighs_list_static(self, k_is=[0]):\n neighs = [self.idxs for k_i in k_is]\n return neighs", "def build_k_indices(num_row, k_fold, seed):\n #num_row = y.shape[0]\n interval = int(num_row / k_fold)\n np.random.seed(seed)\n indices = np.random.permutation(num_row)\n k_indices = [indices[k * interval: (k + 1) * interval]\n for k in range(k_fold)]\n return np.array(k_indices)", "def find_nearest_neighbors_idx(X, x, k):\n ## homework:start\n result = \n ## homework:end\n return result", "def __getitem__(self, k) :\n raise NotImplementedError", "def clustering_and_visulization(self):\n centroids, _ = kmeans(self.data_mat, self.k)\n idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[idx == i, 0])\n self.plot_list1.append(self.data_mat[idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n for i in range(self.k):\n self.cluster = self.data_mat[idx == i]\n self.clusterlist.append(self.cluster)\n\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n\n self.indexdict = {}\n for i in self.clusterdict:\n self.indexdict[i] = []\n print(len(self.clusterdict))\n for i in range(len(idx)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n self.indexdict[j].append(i)\n print(\"cluster dict of packs\",self.indexdict)\n\n self.drugdict = {}\n for i in self.clusterdict:\n self.drugdict[i] = []\n self.drug=[]\n for i in range(len(self.indexdict.keys())):\n for j in range(len(self.indexdict[i])):\n self.drugdict[i].append(self.df.iloc[self.indexdict[i][j]].to_dict())\n print(\"drugs dict with their frequencies\",self.drugdict)\n clusterdict_from_df_as_drug_non_O_frequency = {}\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs ={}\n for i in self.drugdict:\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n for i in self.drugdict:\n for j in self.drugdict[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i]=list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n try:\n common_drug_list = [x for x in clusterdict_of_non_repeated_drugs[0] if x in clusterdict_of_non_repeated_drugs[1]]\n print('\\n')\n print(\"common drug list\", common_drug_list)\n total_frequency_of_drugs_dict = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict[i] = []\n\n for drug in common_drug_list:\n\n for cluster_keys in clusterdict_from_df_as_drug_non_O_frequency.keys():\n temp_list = []\n for cluster_values_as_list in clusterdict_from_df_as_drug_non_O_frequency[cluster_keys]:\n try:\n temp_list.append(cluster_values_as_list[str(drug)])\n except KeyError:\n print(\"\\t\")\n total_frequency_of_drugs_dict[cluster_keys].append(np.sum(temp_list))\n print(\"total drugs frequency\",total_frequency_of_drugs_dict)\n total_frequency_of_drugs_dict_with_drugs = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[i] = []\n temp_list1 = []\n temp_list2 = []\n for keys in self.drugdict.keys():\n temp_list1.append(clusterdict_of_non_repeated_drugs[keys])\n for keys in self.drugdict.keys():\n temp_list2.append(total_frequency_of_drugs_dict[keys])\n temp_list3 = []\n for i in temp_list1:\n for j in temp_list2:\n temp_list3.append(dict(zip(i,j)))\n temp_list4 = temp_list3[:2]\n print('\\n')\n for keys in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[keys].append(temp_list4[keys])\n print(\"total frequency with drugs dict\",total_frequency_of_drugs_dict_with_drugs)\n\n final_drugs_in_clusters_dict = {}\n for i in self.drugdict:\n final_drugs_in_clusters_dict[i] = []\n compare_list = []\n for drug in common_drug_list:\n compare_list.append(min(total_frequency_of_drugs_dict_with_drugs[0][0][drug], total_frequency_of_drugs_dict_with_drugs[1][0][drug]))\n print(\"compare list\",compare_list)\n for values in total_frequency_of_drugs_dict_with_drugs.values():\n for key1, value1 in values[0].items():\n if value1 in compare_list:\n\n key2 =values[0].keys()[values[0].values().index(value1)]\n values[0].pop(key2, None)\n\n\n print('final dict with deleted keys', total_frequency_of_drugs_dict_with_drugs)\n\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in total_frequency_of_drugs_dict_with_drugs[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n print(\"only drugs\",clusterdict_of_non_repeated_drugs)\n\n final_robot_packs_dict = {}\n for i in self.drugdict:\n final_robot_packs_dict[i] = []\n\n winner_drug_dict = {}\n for i in common_drug_list:\n winner_drug_dict[i] = []\n for drug in common_drug_list:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n winner_drug_dict[str(drug)].append(0)\n if drug in clusterdict_of_non_repeated_drugs[1]:\n winner_drug_dict[str(drug)].append(1)\n print(\"winner drug dict\",winner_drug_dict)\n\n for i in self.indexdict:\n print(i)\n for pack in self.indexdict[i]:\n packdict = self.df.iloc[pack].to_dict()\n packdict_non_0 = {x: y for x, y in packdict.items() if y != 0}\n packdict_non_0_key = packdict_non_0.keys()\n for drug in packdict_non_0_key:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n final_robot_packs_dict[0].append(pack)\n elif drug in clusterdict_of_non_repeated_drugs[1]:\n final_robot_packs_dict[1].append(pack)\n\n final_robot_packs_dict[i].append(pack)\n for commondrugs in winner_drug_dict:\n for winnercluster in winner_drug_dict[commondrugs]:\n if winnercluster==0:\n loosercluster =1\n if winnercluster == 1:\n loosercluster = 0\n if commondrugs in packdict_non_0_key and i==loosercluster:\n try:\n final_robot_packs_dict[i].remove(pack)\n final_robot_packs_dict[winnercluster].append(pack)\n except ValueError:\n print('\\t')\n\n for i in self.indexdict:\n final_robot_packs_dict[i] = set(final_robot_packs_dict[i])\n\n print(\"final which pack which robot dict\",final_robot_packs_dict)\n\n except IndexError:\n print(\"No common drugs\")", "def _set_neighs_number(self, key):\n if self.staticneighs:\n self.idxs = np.array([[key]]*len(self.iss))\n else:\n if self.ks is None:\n self.ks = range(1)\n len_ks = len(self.ks)\n self.idxs = np.array([[[key]]*len(self.iss)]*len_ks)\n self._constant_neighs = True\n self._setted = True", "def get_sparse_knn_graph(df, k, algorithm):\n print(\"df shape\", df.shape)\n X = np.array(df)\n nbrs = NearestNeighbors(n_neighbors=k, algorithm=algorithm).fit(X)\n distances, indices = nbrs.kneighbors(X)\n knn_graph = nbrs.kneighbors_graph(X).toarray()\n print(\"indices, graph\", len(indices), knn_graph.shape)\n return indices, knn_graph", "def build_index(dataset, n_neighbors):\n# Initialize FLANN\n pyflann.set_distance_type(distance_type='euclidean')\n flann = pyflann.FLANN()\n params = flann.build_index(dataset,algorithm='kdtree',trees=4)\n #print params\n nearest_neighbors, dists = flann.nn_index(dataset, n_neighbors, checks=params['checks'])\n return nearest_neighbors, dists", "def tril_indices(n,k=0):\r\n return mask_indices(n,tril,k)", "def clustering_and_visulization(self):\n try:\n centroids, _ = kmeans(self.data_mat, self.k)\n except ValueError:\n print(\"The number of clusters is more than the data points\")\n self.idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[self.idx == i, 0])\n self.plot_list1.append(self.data_mat[self.idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n\n for i in range(self.k):\n self.cluster = self.data_mat[self.idx == i]\n self.clusterlist.append(self.cluster)\n print(self.clusterlist)\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n index_dict ={}\n for i in self.clusterdict:\n index_dict[i] = []\n for i in range(len(self.data_mat)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n index_dict[j].append(i)\n print(\"drugs cluster dict\", index_dict)\n\n self.drugsdict = {}\n for i in index_dict:\n self.drugsdict[i] = []\n drugslist = list(self.df.columns.values)\n print(\"drugs list from dataframe\", drugslist)\n\n for i in index_dict:\n self.drugsdict[i] = [drugslist[index] for index in index_dict[i]]\n\n print(\"drugs cluster dict\", self.drugsdict)\n########################################################################################################################\n clusterdict_from_df_as_drug_frequency = {}\n clusterdict_from_df_as_drug_non_O_frequency = {}\n\n print('\\n')\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i] = []\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i].append(self.df.iloc[i].to_dict()) #\n print(\"packs in dict form of drugs frequency\", clusterdict_from_df_as_drug_frequency)\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_frequency[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n for i in range(len(self.df)):\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in range(len(self.df)):\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse(\n [list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n robot_for_packs_dict = {}\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = []\n\n # for i in range(len(self.df)):\n for i in range(len(self.df)):\n for j in clusterdict_of_non_repeated_drugs[i]:\n if j in self.drugsdict[0]:\n robot_for_packs_dict[i].append(0)\n elif j in self.drugsdict[1]:\n robot_for_packs_dict[i].append(1)\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = set(robot_for_packs_dict[i])\n\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = list(more_itertools.collapse(robot_for_packs_dict[i]))\n print('\\n')\n print(\"clusterdict_of_non_repeated_drugs\", robot_for_packs_dict)", "def tree_idx2(treeG,k1,J1,J2):\n g = treeG[J1]['clusters'][k1]\n if(J1>J2+1):\n for j in np.arange(J2+1, J1)[::-1]:\n g1 = []\n for i in np.arange(0,len(g),1):\n g1 = np.array(np.append(g1,treeG[j]['clusters'][g[i]]), dtype = int)\n g = g1\n y = g\n return y", "def hierarchial_clustering(self,k):\r\n\r\n print(colored(\"Performing hierarchial clustering\",color = 'yellow', attrs=['bold']))\r\n self.clustering = AgglomerativeClustering(affinity='euclidean', linkage='ward').fit(self.X)\r\n self.labels = self.clustering.labels_\r\n self.davies_bouldin_score()\r\n print()\r\n print(colored(\"The number of cluster centers formed are %d\\n\" %(self.clustering.n_clusters_),color = 'red', attrs=['bold']))\r\n self.cluster_plot()\r\n return self.labels", "def _set_neighs_array_lvl2(self, key):\n sh = key.shape\n ## If only iss and neighs\n self.idxs = key\n if self.staticneighs:\n self.idxs = np.array(key)\n else:\n len_ks = len(self.ks) if self.ks is not None else 1\n self.ks = range(1) if self.ks is None else self.ks\n self.idxs = np.array([key for k in range(len_ks)])\n self._setted = True\n if sh[0] != len(self.iss):\n self.iss = list(range(sh[0]))", "def build(self,neighborhoods,k=5):\n g_idx = np.zeros(len(neighborhoods),dtype=np.int)\n for i, nn in enumerate(neighborhoods):\n G = Graph(nn,k)\n g_idx[i] = self.encounter(G)\n for i, sig in enumerate(self.sigs):\n if sig not in self.lookup:\n self.lookup[sig] = np.array([],dtype=np.int)\n self.lookup[sig] = np.hstack((self.lookup[sig],np.argwhere(g_idx==self.index[sig]).flatten()))", "def rebuild_indexes(self):\n self.cards = sorted(self.name_to_card.values(), key=lambda card: card.name)\n self.card_sets = sorted(\n self.code_to_card_set.values(), key=lambda cset: cset.release_date\n )\n\n self.set_code_to_printings = collections.defaultdict(list)\n self.card_name_to_printings = collections.defaultdict(list)\n self.set_name_num_mv_to_printings = collections.defaultdict(list)\n\n for printing in self.id_to_printing.values():\n self.set_code_to_printings[printing.set_code].append(printing)\n self.card_name_to_printings[printing.card_name].append(printing)\n # snnm == (set, name, number, multiverseid)\n snnm_index_keys = {\n # pylint: disable=line-too-long\n (\n printing.set_code,\n printing.card_name,\n printing.set_number,\n printing.multiverseid,\n ),\n (printing.set_code, printing.card_name, None, printing.multiverseid),\n (printing.set_code, printing.card_name, printing.set_number, None),\n (printing.set_code, printing.card_name, None, None),\n }\n for key in snnm_index_keys:\n self.set_name_num_mv_to_printings[key].append(printing)\n\n for printings in self.set_code_to_printings.values():\n printings.sort(key=set_code_to_printings_key)\n\n for printings in self.card_name_to_printings.values():\n printings.sort(key=card_name_to_printing_key)\n\n # Build ordered indexes\n self.set_code_to_printing_to_row = {}\n for set_code, printings in self.set_code_to_printings.items():\n self.set_code_to_printing_to_row[set_code] = {\n printing: i for i, printing in enumerate(printings)\n }", "def ssk_from_indices( indices_l, indices_r ):\n return mat[ [[int(il)] for il in indices_l], [int(ir) for ir in indices_r] ]", "def __getitem__(self,k):\n if type(k) is IntType: return self.data[k, 0]\n \n vec = [type(x) is SliceType for x in k]\n \n if True in vec: #suppose only one slice\n ii=vec.index(True)\n indices=[]\n k = list(k)\n import numpy\n rep = numpy.zeros((self.dims[ii],), 'd')\n for i in range(self.dims[ii]):\n k[ii] = i\n rep[i] = self.data[self.comp(k), 0]\n return rep\n else:\n return self.data[self.comp(k), 0]", "def _index_q_list_in_k_list(q_list, k_list):\r\n q_list_length = len(q_list)\r\n k_list_length = len(k_list)\r\n for idx in range(k_list_length - q_list_length + 1):\r\n t = [q == k for q, k in zip(q_list, k_list[idx: idx + q_list_length])]\r\n # print(idx, t)\r\n if all(t):\r\n # print(idx)\r\n idx_start = idx\r\n return idx_start", "def two_dim_index(self, k):\n ind_x = k % self.nx\n ind_y = (k - ind_x) / self.nx\n return (int(ind_y), int(ind_x))", "def find_add_srows(lst_no_anno, check_inds, k):\n\n # Logical, which pair of repeats has a length greater than k (T returns 1, F returns 0)\n search_inds = (L[:,4] > k)\n\n # Multipy the starting index of all repeats \"I\" by search_inds\n SI = np.multiply(L[:,0], search_inds)\n\n # Multiply the starting index of all repeats \"J\" by search_inds\n SJ = np.multiply(L[:,2], search_inds)\n\n # Loop over check_inds\n for i in range(check_inds.size):\n ci = check_inds[i] \n \n # Left check: check for CI on the left side of the pairs \n # Check if the starting index of repeat \"I\" of pair of repeats \"IJ\" equals CI\n lnds = (SI == ci) \n print(\"lnds:\", lnds)\n #print(lnds.sum(axis = 0))\n \n # If the sum across (row) is greater than 0 \n if lnds.sum(axis = 0) > 0: \n # Find the 2nd entry of the row (lnds) whose starting index of repeat \"I\" equals CI \n SJ_li = L[lnds, 2] \n #print(\"SJ_li\", SJ_li)\n \n # Used for the length of found pair of repeats \n l_num = SJ_li.shape[0] #Dim 0 corresponds to rows, wouldn't l_num always be 1? \n #print(\"l_num:\", l_num)\n \n # Found pair of repeats on the left side \n one_lsi = L[lnds, 0] #Starting index of found repeat i\n one_lei = L[lnds, 0] + k - 1 #Ending index of found repeat i\n one_lsj = SJ_li #Starting index of found repeat j\n one_lej = SJ_li + k - 1 #Ending index of found repeat j\n one_lk = np.ones((l_num, 1))*k #Length of found pair of repeats, i and j \n l_add = np.concatenate((one_lsi, one_lei, one_lsj, one_lej, one_lk), axis = None)\n #print(\"l_add:\", l_add)\n \n # Found pair of repeats on the right side \n two_lsi = L[lnds, 0] + k #Starting index of found repeat i \n two_lei = L[lnds, 1] #Ending index of ofund repeat i\n two_lsj = SJ_li + k #Starting index of found repeat j \n two_lej = L[lnds, 3] #Ending index of found repeat j\n two_lk = L[lnds, 4] - k #Length of found pair of repeats, i and j \n l_add_right = np.concatenate((two_lsi, two_lei, two_lsj, two_lej, two_lk), axis = None)\n #print(\"l_add_right:\", l_add_right)\n \n # Stack the found rows vertically \n add_rows = np.vstack((l_add, l_add_right))\n \n # Stack all the rows found on the left side of the pairs \n add_rows = np.concatenate((add_rows, add_rows), axis = 0)\n #print(\"add_rows:\", add_rows)\n \n #print()\n # Right Check: check for CI on the right side of the pairs \n # Check if the the starting index of repeat \"J\" of the pair \"IJ\" equals CI\n rnds = (SJ == ci) \n #print(\"rnds:\", rnds)\n\n if rnds.sum(axis = 0) > 0:\n SJ_ri = L[rnds, 0]\n r_num = SJ_ri.shape[0] \n \n # Found pair of repeats on the left side \n one_rsi = SJ_ri #Starting index of found repeat i \n one_rei = SJ_ri + k - 1 #Ending index of found repeat i \n one_rsj = L[rnds, 2] #Starting index of found repeat j\n one_rej = L[rnds, 2] + k - 1 #Ending index of found repeat j \n one_rk = k*np.ones((r_num, 1)) #Length of found pair or repeats, i and j \n r_add = np.concatenate((one_rsi, one_rei, one_rsj, one_rej, one_rk), axis = None)\n \n # Found pairs on the right side \n two_rsi = SJ_ri + k #Starting index of found repeat i \n two_rei = L[rnds, 1] #Ending index of found repeat i \n two_rsj = L[rnds, 2] + k #Starting index of found repeat j\n two_rej = L[rnds,3] #Ending index of found repeat j \n two_rk = L[rnds, 4] - k #Length of found pair or repeats, i and j \n r_add_right = np.concatenate((two_rsi, two_rei, two_rsj, two_rej, two_rk), axis = None) \n \n # Stack the found rows vertically \n add_rows = np.vstack((r_add, r_add_right))\n \n # Stack all the rows found on the right side of the pairs \n add_rows = np.concatenate((add_rows, add_rows), axis = 0).astype(int)\n #print(add_rows)\n \n return add_rows", "def get_relevant_images_rank(img_lst, img_map, indices, distances, k,operation=\"union\"):\n # k = k \n set_lst = []\n helper = []\n helper2 = []\n for img in img_lst:\n ind_dist = get_similar_imgs_rank(img, img_map, indices, distances, k=k)\n helper.append(ind_dist[0])\n set_lst.append(ind_dist[1])\n helper2.append(set(ind_dist[0]))\n\n # distances = distances[:k]\n helper = sum(helper, [])\n set_lst = sum(set_lst, [])\n\n\n df = pd.DataFrame({\n \"indices\": helper,\n \"distances\": set_lst\n })\n\n if operation == \"union\":\n # imgs = list(set.union(*df[\"indices\"]))\n # print(len(df))\n df = df.drop_duplicates(subset=\"indices\")\n # print(len(df))\n\n df = df.sort_values(\"distances\")\n print(df)\n return df[\"indices\"].values\n if operation == \"intersection\":\n # inter = list(set.intersection(*helper2))\n # print(inter)\n df = df[df[\"indices\"].isin(list(set.intersection(*helper2)))]\n df = df.drop_duplicates(subset=\"indices\")\n df = df.sort_values(\"distances\")\n # print(df)\n return df[\"indices\"].values", "def _find_nearest_neighbors(self, k=15):\n # this isn't running as expected\n # if self.pca_matrix.any():\n # sys.exit(\"Please run reduce matrix dimensions for populate the PCA matrix.\")\n\n # key will represent index for artificial doublet\n # value will hold list of the most similar doublets\n nn_obj = nearest_neighbors.NearestNeighbors(self.pca_matrix, k)\n\n # create set of indices for nearest neighbors to ignore; set contains indices for artificial doublets\n idxs_to_ignore = {\n i for i in range(self.num_cells, self.num_cells + self.num_artifial_doublets)\n }\n for i in range(self.num_cells, self.num_cells + self.num_artifial_doublets):\n neighbors = nn_obj.get_nearest_neighbors(i, idxs_to_ignore)\n neighbors = [\n i for i in neighbors if i[1] < self.num_cells\n ] # only include similarity if that similarity is for a cell barcode\n self.nearest_neighbors_dict[i] = neighbors", "def get_knn_inds(pdist, k=20, remove=False):\n if remove:\n _, knn_inds = torch.topk(pdist, k + 1, largest=False, sorted=False)\n return knn_inds[..., 1:]\n else:\n _, knn_inds = torch.topk(pdist, k, largest=False, sorted=False)\n return knn_inds", "def _get_neighs_slice(self, k_is=[0]):\n neighs = [np.array([range(self.idxs.start, self.idxs.stop,\n self.idxs.step)\n for j in range(len(self.iss))])\n for i in range(len(k_is))]\n neighs = np.array(neighs)\n return neighs", "def indices(self):\n _indices = []\n for h in self.miller.indices():\n _indices.append(self.indices_hkl(*h)[0])\n return _indices", "def _create_idx(self):\n self._idx = {}\n for idx, (L, M, N) in enumerate(self.modes):\n if L not in self._idx:\n self._idx[L] = {}\n if M not in self._idx[L]:\n self._idx[L][M] = {}\n self._idx[L][M][N] = idx", "def zernike_Double_Index(nlevels):\n \n\t \n if not (nlevels>=0):\n print('Input parameter nlevels must be >= 0')\n raise AssertionError()\n \n if (nlevels == 0):\n \n m = 0\n n = 0\n \n return n, m\n \n else:\n \n # ++++ Defining layout for row number n and colunmn number m ++++++++\n\n row_n = nlevels+1\n col_m = 2*nlevels +1\n x = np.arange(row_n)\n y = np.arange(-(col_m-1)//2, (col_m+1)//2,1)\n Q = [(i,j) for i in x for j in y]\n #\n\n\n nm_index = []\n \n top = (col_m + 1)/2\n leftside = row_n*col_m - col_m + 1\n rightside = row_n*col_m \n\n k1 = 0; k2 = 0\n\n for i in xrange(top,row_n*col_m+1, 2*col_m):\n\n nm_index.append(Q[i-1])\n s1 = i + col_m + 1\n s2 = i + col_m - 1 \n jj1 = k1\n jj2 = k2\n\n\n while (s2 <= leftside): \n\n nm_index.append(Q[s2-1])\n s2 +=col_m - 1\n jj1 += 1\n jj2 -= 1\n\n leftside +=2\n\n jj1 = k1\n jj2 = k2\n\n while (s1 <= rightside): \n\n # \n nm_index.append(Q[s1-1])\n s1 +=col_m + 1\n jj1 += 1\n jj2 += 1\n\n rightside -=2\n k1 = 0; k2 += 2\n\n n = np.array(nm_index)[:,0]\n m = np.array(nm_index)[:,1]\n\n return n, m", "def __getitem__(self, k):\n # set a pinter\n # in case k non negetive\n if k >= 0:\n # set a pinter\n node = self.head\n # move the pointer to the right position\n for i in range(k):\n if node != self.tail:\n node = node.next\n # raise an error if k > length of list \n else:\n raise IndexError\n return node.data\n # in case k negetive\n else:\n # set a pinter\n node = self.tail\n # move the pointer to the right position\n for i in range(-k // 2):\n if node is not None and node != self.head:\n node = node.skip_back\n # raise an error if abs(k) > length of list \n else:\n raise IndexError\n # return the head if one if k is odd amd pointer move before head\n if node is None:\n if k % 2 == 0:\n return self.head.data\n # raise an error if abs(k) > length of list\n else:\n raise IndexError\n # move the pointer to the next one if k is odd\n if k % 2 == 0:\n node = node.next\n return node.data", "def kAnonymit(g,k):\n rlist=list()\n degreeH=nx.degree_histogram(g)\n## print degreeH\n li=[[] for i in range(len(degreeH))]\n uNode=dict(zip(range(len(degreeH)),li))\n \n gd=g.degree()\n for e in gd:\n uNode[gd[e]].append(e)\n ad=list()\n mi=list()\n su=list() \n sel3(g,3,ad,mi,su)\n nodeAd=list()\n nodeMi=list()\n nodeSu=list() \n for i in ad:\n nodeAd=nodeAd+uNode[i]\n #nodeAd store the graph ID\n for i in mi:\n nodeMi=nodeMi+uNode[i]\n for i in su:\n nodeSu=nodeSu+uNode[i]\n\n print nodeAd,nodeMi,nodeSu\n\n\n i=0#Add edge \n while i<len(nodeAd)-1:\n j=i+1\n while j<len(nodeAd):\n if not g.has_edge(nodeAd[i],nodeAd[j]):\n g.add_edge(nodeAd[i],nodeAd[j])\n rlist.append(nodeAd[i])\n rlist.append(nodeAd[j])\n nodeAd.pop(j)\n nodeAd.pop(i)\n i=i-1\n break\n else:\n j=j+1\n i=i+1\n print nodeAd\n \n i=0#delet edge\n while i<len(nodeMi)-1:\n j=i+1\n while j<len(nodeMi):\n if g.has_edge(nodeMi[i],nodeMi[j]):\n g.remove_edge(nodeMi[i],nodeMi[j])\n rlist.append(nodeMi[i])\n rlist.append(nodeMi[j])\n nodeMi.pop(j)\n nodeMi.pop(i)\n i=i-1\n break\n else:\n j=j+1\n i=i+1\n print nodeMi\n \n i=0#Add edge \n while i<len(nodeSu):\n j=0\n while j<len(nodeAd):\n if not g.has_edge(nodeSu[i],nodeAd[j]):\n g.add_edge(nodeSu[i],nodeAd[j])\n rlist.append(nodeSu[i])\n rlist.append(nodeAd[j])\n nodeAd.pop(j)\n nodeSu.pop(i)\n \n i=i-1\n break\n else:\n j=j+1\n i=i+1\n print nodeSu\n \n i=0#delet edge\n while i<len(nodeSu):\n j=0\n while j<len(nodeMi):\n if g.has_edge(nodeSu[i],nodeMi[j]):\n g.remove_edge(nodeSu[i],nodeMi[j])\n rlist.append(nodeSu[i])\n rlist.append(nodeMi[j])\n nodeMi.pop(j)\n nodeSu.pop(i)\n i=i-1\n break\n else:\n j=j+1\n i=i+1\n print nodeSu\n return rlist", "def generate_L(data_set, k, min_support):\r\n support_data = {}\r\n C1 = create_C1(data_set)\r\n L1 = generate_Lk_by_Ck(data_set, C1, min_support, support_data)\r\n Lksub1 = L1.copy()\r\n L = []\r\n L.append(Lksub1)\r\n for i in range(2, k+1):\r\n Ci = create_Ck(Lksub1, i)\r\n Li = generate_Lk_by_Ck(data_set, Ci, min_support, support_data)\r\n Lksub1 = Li.copy()\r\n L.append(Lksub1)\r\n return L, support_data", "def _set_neighs_array_lvl1(self, key):\n #sh = key.shape\n ## If only array of neighs\n if self.staticneighs:\n self.idxs = np.array([key for i in range(len(self.iss))])\n else:\n self.ks = range(1) if self.ks is None else self.ks\n len_ks = len(self.ks)\n self.idxs = np.array([[key for i in range(len(self.iss))]\n for i in range(len_ks)])\n self._setted = True", "def cluster_dpc_knn(token_dict, cluster_num, k=5, token_mask=None):\n with torch.no_grad():\n x = token_dict['x']\n B, N, C = x.shape\n dist_matrix = torch.cdist(x, x) / C ** 0.5\n if token_mask is not None:\n token_mask = token_mask > 0\n dist_matrix = dist_matrix * token_mask[:, None, :] + (dist_matrix.max() + 1) * ~token_mask[:, None, :]\n dist_nearest, index_nearest = torch.topk(dist_matrix, k=k, dim=-1, largest=False)\n density = (-(dist_nearest ** 2).mean(dim=-1)).exp()\n density = density + torch.rand(density.shape, device=density.device, dtype=density.dtype) * 1e-06\n if token_mask is not None:\n density = density * token_mask\n mask = density[:, None, :] > density[:, :, None]\n mask = mask.type(x.dtype)\n dist_max = dist_matrix.flatten(1).max(dim=-1)[0][:, None, None]\n dist, index_parent = (dist_matrix * mask + dist_max * (1 - mask)).min(dim=-1)\n score = dist * density\n _, index_down = torch.topk(score, k=cluster_num, dim=-1)\n dist_matrix = index_points(dist_matrix, index_down)\n idx_cluster = dist_matrix.argmin(dim=1)\n idx_batch = torch.arange(B, device=x.device)[:, None].expand(B, cluster_num)\n idx_tmp = torch.arange(cluster_num, device=x.device)[None, :].expand(B, cluster_num)\n idx_cluster[idx_batch.reshape(-1), index_down.reshape(-1)] = idx_tmp.reshape(-1)\n return idx_cluster, cluster_num", "def get_knn_all(self, query_idxs, k=None):\n k = self.args.k if k is None else k\n assert get_rank() == 0\n in_query_mask = np.isin(self.idxs, query_idxs)\n assert np.sum(in_query_mask) == query_idxs.size\n in_query_X = self.X[in_query_mask]\n _, I = self._build_and_query_knn(self.X, in_query_X, k+1)\n remap = lambda i : self.idxs[i]\n v_remap = np.vectorize(remap)\n I = v_remap(I)\n return I[:,1:]", "def generate_Lk_by_Ck(fptree, data_set, Ck,dim, min_support, support_data):\n Lk = set()\n item_count = {}\n t_num = float(len(data_set))\n if dim == 1:\n for t in data_set:\n for item in Ck:\n if item.issubset(t):\n if item not in item_count:\n item_count[item] = 1\n else:\n item_count[item] += 1\n t_num = float(len(data_set))\n else:\n for item in Ck:\n array = list(item)\n subData, support = fptree.getSubData(array[len(array) - 1], dim)\n for i in range(0, len(subData)):\n if item.issubset(subData[i]):\n if item not in item_count:\n item_count[item] = support[i]\n else:\n item_count[item] += support[i]\n for item in item_count:\n if (item_count[item] / t_num) >= min_support:\n Lk.add(item)\n support_data[item] = item_count[item] / t_num\n\n return Lk", "def caesuras(k):\n caesuras = []\n for idx, i in enumerate(k):\n if i in k[idx + 1:]:\n caesuras.append(idx)\n return caesuras", "def learn_MAP(self,knowledge,k=10):\n \n gradients = {}\n for i in range(k):\n\n #create TILDE(R) tree object\n tree_i = TILDE(typ=\"regression\",score=\"WV\",max_depth=self.max_depth)\n\n #subsample negatives if too many for each tree\n sampled_neg = deepcopy(self.neg)\n if len(self.neg) > 2*len(self.pos):\n sampled_neg = sample(self.neg,2*len(self.pos))\n\n #compute gradients as I-P\n for ex in self.examples:\n parameter = knowledge.calculate_parameter(self.data,\n ex,\n self.examples[ex])\n p = sigmoid(self.examples[ex])\n if ex in self.pos:\n gradients[ex] = 1-p - parameter\n elif ex in sampled_neg:\n gradients[ex] = 0-p - parameter\n\n #fit tree on gradients\n tree_i.learn(self.data,self.bk,self.target,examples=gradients)\n \n #recompute example values as previous example value + tree_i value\n for ex in self.examples:\n tree_i_value = tree_i.infer(self.data,ex)\n self.examples[ex] += tree_i_value\n\n #add tree to boosted_trees\n self.boosted_trees.append(tree_i)", "def reindex(self):\n self._index = {w: i for i, w in enumerate(self._words)}\n self.n, self.d = self._vecs.shape\n assert self.n == len(self._words) == len(self._index)\n self._neighbors = None", "def update_idx(self):\n self.idx = (self.F * self.FMUL +\n self.E * self.EMUL +\n self.Z * self.ZMUL +\n self.A * self.AMUL +\n self.B * self.BMUL )", "def indices(self):\n if self._indices is None:\n i = []\n\n # TODO: this is not right for multi-column keys\n # TODO: new style indexes\n\n global_name = '^DD(%s,0,\"IX\",\"0\")' % self.fileid\n prefix = '^DD(%s,0,\"IX\",' % self.fileid\n while 1:\n global_name = M.mexec('set s0=$query(%s)' % global_name, M.INOUT(\"\"))[0]\n if not global_name or not global_name.startswith(prefix):\n break\n suffix = global_name[len(prefix):-1]\n parts = suffix.split(\",\")\n idx_name = parts[0][1:-1]\n idx_table = parts[1]\n idx_columns = parts[2:]\n index = Index(idx_name, idx_table, idx_columns)\n i.append(index)\n\n # A second list, gives indices for a field\n columns = {}\n for idx in i:\n for c in idx.columns:\n columns[c] = 1\n\n # Now trawl the listed columns in the data dictionary, and load their\n # cross references.\n cr_names = {}\n for c in columns.keys():\n idx_root = M.Globals[\"^DD\"][self.fileid][c][1]\n if not idx_root[0].exists():\n continue\n for cr_id, val in idx_root.keys_with_decendants():\n if float(cr_id) > 0:\n cr_header = idx_root[cr_id][0].value\n parts = cr_header.split(\"^\")\n if len(parts) == 2 and parts[1]: # if more than 2 parts, assume MUMPs trigger\n f = cr_names.get(parts[1], list())\n f.append(c)\n cr_names[parts[1]] = f\n\n # Now, just delete items from the index list if they are not in cr_names\n self._indices = []\n for index in i:\n cr = cr_names.get(index.name)\n if cr:\n # verify columns - lots of errors in real systems\n if len(cr) == len(index.columns):\n invalid = False\n for c in cr:\n if c not in index.columns:\n invalid = True\n continue\n if not invalid:\n self._indices.append(index)\n\n return self._indices", "def learn(self,k=10):\n \n gradients = {}\n for i in range(k):\n\n #create TILDE(R) tree object\n tree_i = TILDE(typ=\"regression\",score=\"WV\",max_depth=self.max_depth)\n\n #subsample negatives if too many for each tree\n sampled_neg = deepcopy(self.neg)\n if len(self.neg) > 2*len(self.pos):\n sampled_neg = sample(self.neg,2*len(self.pos))\n\n #compute gradients using LMNN loss function\n for ex in self.examples:\n gradient = self.compute_gradient(ex,\n self.pos,\n self.neg,\n self.examples)\n gradients[ex] = gradient\n\n\n #fit tree on gradients\n tree_i.learn(self.data,self.bk,self.target,examples=gradients)\n \n #recompute example values as previous example value - gamma*tree_i value\n for ex in self.examples:\n tree_i_value = tree_i.infer(self.data,ex)\n self.examples[ex] -= 0.01*tree_i_value #learning rate\n \n #add tree to boosted_trees\n self.boosted_trees.append(tree_i)", "def get_k_fold(examples, labels, k=10):\n example_fold = []\n label_fold = []\n interval = int(len(examples)/k)\n for i in range(k):\n \t#f_examples = [examples[j] for j in range(len(examples)) if j%k == i]\n #f_labels = [labels[j] for j in range(len(labels)) if j%k == i]\n f_examples = [examples[j] for j in range(interval*i,interval*(i+1))]\n f_labels = [labels[j] for j in range(interval*i,interval*(i+1))]\n example_fold.append(f_examples)\n label_fold.append(f_labels)\n return example_fold, label_fold", "def _set_neighs_slice(self, key):\n ## Condition to use slice type\n self._constant_neighs = True\n self.ks = range(1) if self.ks is None else self.ks\n ## Possible options\n if key is None:\n self.idxs = slice(0, self._n, 1)\n elif isinstance(key, slice):\n start = 0 if key.start is None else key.start\n stop = self._n if key.stop is None else key.stop\n stop = self._n if key.stop > 10*16 else key.stop\n step = 1 if key.step is None else key.step\n self.idxs = slice(start, stop, step)\n elif type(key) in inttypes:\n self.idxs = slice(0, key, 1)\n elif type(key) == tuple:\n self.idxs = slice(key[0], key[1], 1)\n self._setted = True", "def dilatation_matrice(kx,ky,M):\n\n # kx,ky entiers\n n = len(M) # Nb de lignes\n p = len(M[0]) # Nb de colonnes\n N = [[0 for j in range(kx*p)] for i in range(ky*n)]\n for i in range(ky*n):\n for j in range(kx*p):\n N[i][j] = M[i//ky][j//kx]\n return N", "def index_wrap(self, k):\n return (self.first_player + k) % self.num_players", "def prepare_data_matrix():\n # create matrix X and list of languages\n\n lds = {}\n for fn in listdir(\"clustering\"):\n if fn.lower().endswith(\".txt\"):\n with open(join(\"clustering\", fn), encoding=\"utf8\") as f:\n text = f.read()\n nter = terke(text, n=3)\n lds[fn] = nter\n #print(lds.keys())\n \n #lds is a dictionary of dictionaries: {\"slovenian.txt\": {\"abc\":3,\"efg\":4...}, \"macedonian.txt\":{\"efg\":6...},...}\n l=listOfTuples(lds) #list of strings\n #print(l[:100])\n languages = list(lds.keys()) # ['Slo', 'Mac', ]\n # which language represents row number i: languages[i]\n # which row does language s represent: languagues.index(s)\n X=np.zeros([len(languages),100])\n for i in range(len(languages)):\n #print(languages[i])\n count = 0\n for j in range(100):\n if l[j] in lds[languages[i]]:\n X[i,j]=lds[languages[i]][l[j]]\n count += 1\n # print(count)\n\n #print([sum(x) for x in X])\n \n return X, languages\n # X, languages = prepare_data_matrix()", "def exercise_indexes():\n print(exercise_indexes.__doc__)\n print(\"The indexes of 'data' are:\", data.index)\n print(data, \"\\n\")\n print(\"Changing the indexes of 'data'\")\n print(data.reindex([2, 0, 1]), \"\\n\")\n print(\"Changing the indexes of 'data' randomly\")\n print(data.reindex(np.random.permutation(data.index)))", "def Get(self,k:int): \n ### get partitions depending on the partition schemes C that depends on k!\n return subsets_k(list(range(self._n)),k)", "def set_neighbours(self,knodes):\n self.neighbours = []\n for kn in knodes:\n # Make sure we don't have ourselves as a neighbour:\n if kn.ident == self.ident:\n continue\n # A neighbour has a path length 1:\n self.neighbours.append(\\\n kn._replace(path_len=1))\n\n\n # Update known nodes:\n self.add_known_nodes(0,self.neighbours)", "def fK(dO):\n dO.begin()\n fL()\n dO.end()", "def kl_pairs(self, n_kls):\n kl_grid = np.indices((n_kls*2+1, n_kls*2+1))-n_kls\n return kl_grid.reshape(2, (n_kls*2+1)**2).transpose()", "def k_neighbors(self, unknown, dataset, k):\n distances = []\n for title in dataset:\n point = dataset[title]\n distance_to_point = distance.euclidean_distance(point, unknown)\n distances.append([distance_to_point, title])\n distances.sort()\n neighbors = distances[0:k]\n return neighbors", "def ref_main(l, k):\n S = sum(binomial(k, z)*(2*z-k)**l for z in range(k+1))\n return S / 2**k", "def ref_main(l, k):\n S = sum(binomial(k, z)*(2*z-k)**l for z in range(k+1))\n return S / 2**k", "def gen_k_ary_ind_from_cliques(k: int, E: Iterable[Edge]) -> FrozenSet[Edge]:\n result = set()\n for i in E:\n result.update(map(Edge, itertools.permutations(i, k)))\n return frozenset(result)", "def _query(self, data, index, k):\n if self.approx_nearest_neighbors:\n nn_indices, nn_distances = index.query(data, k=k)\n else:\n nn_distances, nn_indices = index.kneighbors(data, n_neighbors=k)\n\n return nn_indices, nn_distances", "def _set_neighs_array_lvl3(self, key):\n self.idxs = np.array(key)\n self.ks = range(len(self.idxs)) if self.ks is None else self.ks\n if self.staticneighs:\n self.idxs = np.array(key[0])\n if len(self.idxs) != len(self.iss):\n self.iss = list(range(len(self.idxs)))\n else:\n if len(self.idxs[0]) != len(self.iss):\n self.iss = list(range(len(self.idxs[0])))\n self._setted = True", "def build_knn(coords, k=6, **kwargs):\n \n tree = BallTree(coords, **kwargs)\n _, ind = tree.query(coords, k=k+1) # the first k is \"oneself\"\n pairs = pairs_from_knn(ind)\n return pairs", "def kruskal(Grafo,diferencia):\n edges = list()\n #print(diferencia,\"la diferencia\" )\n for i in range(len(Grafo)): # collect the edges in G\n for v,w in Grafo[i]:\n if (w!=-1):\n edges.append((i,v,w))\n # sort the edges in ascending order w.r.t weights in the edges\n edges.sort(key=lambda x: x[2])## se organiza por peso \n ans,sans = [ list() for i in range(len(Grafo)) ],0\n df = dforest(len(Grafo))\n i = 0\n contador=0\n while i!=len(edges):\n u,v,w = edges[i]\n if df.find(u)!=df.find(v):\n df.union(u,v)\n contador+=1\n if(contador==diferencia):\n #print (w,\"pinche w\")\n return w\n\n i += 1", "def d_id(a, k):\n import numpy as np\n rows, cols = np.diag_indices_from(a)\n if k < 0:\n return rows[:k], cols[-k:]\n elif k > 0:\n return rows[k:], cols[:-k]\n else:\n return rows, cols", "def get_k_neighbors(self, point):\n nn = []\n nnl = []\n for p,l in zip(self.train_features,self.train_labels):\n d = self.distance_function(p,point)\n dl_pair = (d,l)\n nn.append(dl_pair)\n nn = sorted(nn, key = lambda x: x[0])\n for i in range(0,self.k):\n nnl.append(nn[i][1])\n return nnl\n raise NotImplementedError", "def build_k_indices(y, k_fold, seed):\n num_row = y.shape[0]\n interval = int(num_row / k_fold)\n np.random.seed(seed)\n indices = np.random.permutation(num_row)\n k_indices = [indices[k * interval: (k + 1) * interval] for k in range(k_fold)]\n return np.array(k_indices)", "def _component_kl_distances(self):\n\n K = self.weight.size\n if K == 1: return ([])\n\n kl = np.inf * np.ones((K, K))\n\n for i in range(K):\n for j in range(i + 1, K):\n kl[i, j] = kullback_leibler_for_multivariate_normals(\n self.mean[i], self.covariance[i],\n self.mean[j], self.covariance[j])\n kl[j, i] = kullback_leibler_for_multivariate_normals(\n self.mean[j], self.covariance[j],\n self.mean[i], self.covariance[i])\n\n # Best for each *from*.\n indices = list(zip(*(np.arange(K), np.argsort(kl, axis=1).T[0])))\n\n _ = np.array(indices).T\n sorted_indices = np.argsort(kl[_[0], _[1]])\n return tuple([indices[_] for _ in sorted_indices if indices[_][0] != indices[_][1]])\n\n return foo", "def knn0(pnts, p, k):\r\n p = np.asarray(p)\r\n pnts = np.asarray(pnts)\r\n diff = pnts - p[np.newaxis, :]\r\n d = np.einsum('ij,ij->i', diff, diff)\r\n idx = np.argsort(d)[:k]\r\n# s = [i.tolist() for i in pnts[idx]]\r\n return pnts[idx].tolist()", "def test_k_index():\n pressure = np.array([1014., 1000., 997., 981.2, 947.4, 925., 914.9, 911.,\n 902., 883., 850., 822.3, 816., 807., 793.2, 770.,\n 765.1, 753., 737.5, 737., 713., 700., 688., 685.,\n 680., 666., 659.8, 653., 643., 634., 615., 611.8,\n 566.2, 516., 500., 487., 484.2, 481., 475., 460.,\n 400.]) * units.hPa\n temperature = np.array([24.2, 24.2, 24., 23.1, 21., 19.6, 18.7, 18.4,\n 19.2, 19.4, 17.2, 15.3, 14.8, 14.4, 13.4, 11.6,\n 11.1, 10., 8.8, 8.8, 8.2, 7., 5.6, 5.6,\n 5.6, 4.4, 3.8, 3.2, 3., 3.2, 1.8, 1.5,\n -3.4, -9.3, -11.3, -13.1, -13.1, -13.1, -13.7, -15.1,\n -23.5]) * units.degC\n dewpoint = np.array([23.2, 23.1, 22.8, 22., 20.2, 19., 17.6, 17.,\n 16.8, 15.5, 14., 11.7, 11.2, 8.4, 7., 4.6,\n 5., 6., 4.2, 4.1, -1.8, -2., -1.4, -0.4,\n -3.4, -5.6, -4.3, -2.8, -7., -25.8, -31.2, -31.4,\n -34.1, -37.3, -32.3, -34.1, -37.3, -41.1, -37.7, -58.1,\n -57.5]) * units.degC\n ki = k_index(pressure, temperature, dewpoint)\n assert_almost_equal(ki, 33.5 * units.degC, 2)", "def reindex(self):", "def reindex(self):", "def build_k_indices(y, k_fold, seed):\n num_row = y.shape[0]\n interval = int(num_row / k_fold)\n np.random.seed(seed)\n indices = np.random.permutation(num_row)\n k_indices = [indices[k * interval: (k + 1) * interval]\n for k in range(k_fold)]\n return np.array(k_indices)", "def build_k_indices(y, k_fold, seed):\n num_row = y.shape[0]\n interval = int(num_row / k_fold)\n np.random.seed(seed)\n indices = np.random.permutation(num_row)\n k_indices = [indices[k * interval: (k + 1) * interval]\n for k in range(k_fold)]\n return np.array(k_indices)", "def show_k():\n\n page = request.args.get('page', 1, type=int)\n knowledges_ids = Knowledge.query.order_by(Knowledge.id.asc()).paginate(\n page, current_app.config['PAGE_ITEMS'], False)\n\n k = \"myK000\"\n\n knowledges_list = [(f'{k}{i.id}' if (i.id < 10) else f'{\"myK00\"}{i.id}'\n if(i.id < 100) else f'{\"myK0\"}{i.id}', i.description) for i in knowledges_ids.items]\n\n verK = True\n fileDir = os.path.dirname(os.path.realpath('__file__'))\n\n # me tengo que meter a la ruta base/cyber_role y ejecutar este endpoint\n file_json = 'cyber_role/KSAT_JSON/Knowledges.json'\n\n if not isfile(join(fileDir, file_json)):\n file_json = 'KSAT_JSON/Knowledges.json'\n\n with open(file_json) as file:\n # Obtenemos el json del fichero\n data = json.load(file)\n\n equivalencia_nist = {}\n # ya tenemos el diccionario del nist, original\n values = list(data.values())\n keys = list(data.keys())\n\n for i in knowledges_ids.items:\n if i.description in values:\n equivalencia_nist[i.id] = keys[values.index(i.description)]\n\n\n return render_template('general/ksat.html', title='Knowledges',\n lista_K=knowledges_ids, l_K=knowledges_list,\n l_eq=list(equivalencia_nist.values()), verK=verK)", "def findIndex(cluster_size, cluster_idx, tracked_clusters, index_tracked_clusters):\n \n print 'Update is conducting'\n # Calculate the largest 5 clusters\n size_arr = np.asarray(cluster_size)\n maxs = -bot.partsort(-size_arr, 5)[:5] # 5 largest clusters\n maxs = sorted(maxs, reverse=True)\n \n # Index the largest 5 clusters\n for i, cs in enumerate(cluster_size):\n if cs == maxs[0]:\n index_tracked_clusters[0] = i \n tracked_clusters[0] = cluster_idx[i]\n continue\n elif cs == maxs[1]:\n index_tracked_clusters[1] = i\n tracked_clusters[1] = cluster_idx[i]\n continue\n elif cs == maxs[2]:\n index_tracked_clusters[2] = i\n tracked_clusters[2] = cluster_idx[i]\n continue\n elif cs == maxs[3]:\n index_tracked_clusters[3] = i\n tracked_clusters[3] = cluster_idx[i]\n continue\n elif cs == maxs[4]:\n index_tracked_clusters[4] = i\n tracked_clusters[4] = cluster_idx[i] \n continue", "def build_k_indices(y, k_fold, seed):\n num_rows = y.shape[0]\n interval = int(num_rows / k_fold)\n np.random.seed(seed)\n indices = np.random.permutation(num_rows)\n k_indices = [indices[k * interval: (k + 1) * interval] for k in range(k_fold)]\n return np.array(k_indices)", "def key_klifs_residues(numbering):\n if numbering == None:\n print(\"The structure was not found in the klifs database.\")\n key_res = None\n return key_res\n\n key_res = dict() #initialize key_res (which read from the 0-based numbering list)\n for i in range(5):\n key_res[f'group{i}'] = list()\n ## feature group 0: A-loop backbone dihedrals\n key_res['group0'].append(numbering[83]) # start of A-loop\n\n ## feature group 1: P-loop backbone dihedrals\n key_res['group1'].append(numbering[3]) # res0 in P-loop\n key_res['group1'].append(numbering[4]) # res1 in P-loop\n key_res['group1'].append(numbering[5]) # res2 in P-loop\n key_res['group1'].append(numbering[6]) # res3 in P-loop\n key_res['group1'].append(numbering[7]) # res4 in P-loop\n key_res['group1'].append(numbering[8]) # res5 in P-loop\n\n ## feature group 2: aC-related features\n #angle between aC and aE helices\n key_res['group2'].append(numbering[19]) # res0 in aC\n key_res['group2'].append(numbering[29]) # res10 in aC\n key_res['group2'].append(numbering[62]) # end of aE\n\n # key salt bridge\n key_res['group2'].append(numbering[16]) # K in beta III\n key_res['group2'].append(numbering[23]) # E in aC\n\n ## feature group 3: DFG-related features\n key_res['group3'].append(numbering[79]) # X-DFG\n key_res['group3'].append(numbering[80]) # DFG-Asp\n key_res['group3'].append(numbering[81]) # DFG-Phe\n key_res['group3'].append(numbering[27]) # ExxxX\n\n ## feature group 4: the FRET distance\n # not in the list of 85 (equivalent to Aura\"S284\"), use the 100% conserved beta III K as a reference\n key_res['group4'].append(numbering[16] + 120)\n\n # not in the list of 85 (equivalent to Aura\"L225\"), use the 100% conserved beta III K as a reference\n key_res['group4'].append(numbering[16] + 61)\n\n return key_res", "def kookurrenz_matrix(text, stoppwoerter, nachbarn_anzahl, häufigkeits_liste, vectorizer=TfidfVectorizer, gleiches_wort_null=False):\n vocab = vokabular_erstellen(häufigkeits_liste)\n nachbarn = nachbarn_aller_woerter(text, size=nachbarn_anzahl)\n c_vectorizer = vectorizer(stop_words=stoppwoerter, vocabulary=vocab)\n term_document_matrix = c_vectorizer.fit_transform(nachbarn)\n term_term_matrix = (term_document_matrix.T * term_document_matrix)\n \n if gleiches_wort_null:\n term_term_matrix.setdiag(0)\n \n \n ###\n # EVTL. AENDERN!!\n ###\n dense_term_term_matrix = term_term_matrix.todense() \n\n return dense_term_term_matrix\n #return term_term_matrix", "def _general_get_information(self, k=None):\n ## Format k\n ks = self.get_k(k)\n idx_ks = self._get_k_indices(ks)\n ## Get iss\n iss = self.iss\n ## Format idxs\n assert(type(idx_ks) == list)\n neighs = self.get_neighs(idx_ks)\n sp_relative_pos = self.get_sp_rel_pos(idx_ks)\n self.check_output_standards(neighs, sp_relative_pos, ks, iss)\n# print '3'*50, neighs, sp_relative_pos, ks, iss\n return neighs, sp_relative_pos, ks, iss", "def learn(self,k=10):\n \n gradients = {}\n for i in range(k):\n\n #create TILDE(R) tree object\n tree_i = TILDE(typ=\"regression\",score=\"WV\",max_depth=self.max_depth)\n\n #subsample negatives if too many for each tree\n sampled_neg = deepcopy(self.neg)\n if len(self.neg) > 2*len(self.pos):\n sampled_neg = sample(self.neg,2*len(self.pos))\n\n #compute gradients as I-P\n for ex in self.examples:\n p = sigmoid(self.examples[ex])\n if ex in self.pos:\n gradients[ex] = 1-p\n elif ex in sampled_neg:\n gradients[ex] = 0-p\n\n #fit tree on gradients\n tree_i.learn(self.data,self.bk,self.target,examples=gradients)\n \n #recompute example values as previous example value + tree_i value\n for ex in self.examples:\n tree_i_value = tree_i.infer(self.data,ex)\n self.examples[ex] += 0.01*tree_i_value\n\n #add tree to boosted_trees\n self.boosted_trees.append(tree_i)", "def rotate(self, nums:[int], k: int) -> None:\n l = len(nums)\n k = k%l\n \n if k == l:\n return \n \n \n index = 0\n i = 0\n \n while index < l:\n a = b = i\n b = (i+k)%l\n tmp2 = nums[a]\n\n while b != i:\n tmp1 = tmp2\n tmp2 = nums[b]\n nums[b] = tmp1\n a = b\n b = (b+k)%l\n index += 1\n nums[i] = tmp2\n i += 1\n index += 1", "def dyad_to_index(j, k):\n return 2**j + k", "def find_new_kbl(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n z_u_r = self.grid_dict['z_u_r']\n\n #---> j loop\n for j in range(Ly):\n self.kbl[j] = N #initialize search at top\n\n # in fortran k=N-1,1,-1\n for k in range(N-1,0,-1):\n #INDEX MAP\n k_w = k\n k_r = k-1\n \n for j in range(Ly):\n if z_u_w[j,k_w] > z_u_w[j,N] - self.hbls[j]:\n self.kbl[j] = k_w", "def get_2away_pairs(local_index_to_kmer, k):\n\n #These are the base cases for the recursion. If k==1, the kmers obviously can't differ in exactly two bases, so return an empty list. if k==2, return every pair of indices where the kmers at those indices differ at exactly two bases.\n if k == 1:\n return []\n if k == 2:\n return [(i, j) for (i,j) in combinations(local_index_to_kmer, 2) if local_index_to_kmer[i][0] != local_index_to_kmer[j][0] and local_index_to_kmer[i][1] != local_index_to_kmer[j][1]]\n\n #Get the two halves of the kmer\n k_L = k//2\n k_R = k-k_L\n\n #initialize dictionaries in which the key is the hash of half of the kmer, and the value is a list of indices of the kmers with that same hash\n kmer_L_hashes = defaultdict(list)\n kmer_R_hashes = defaultdict(list)\n\n #initialize pairs, which will be returned by get_1away_pairs\n pairs = []\n\n #initialize dictionaries containing the left halves and the right halves (since we will have to check cases where the left half differs by 1 and the right half differs by 1)\n local_index_to_kmer_L = {}\n local_index_to_kmer_R = {}\n\n #for each kmer, calculate its left hash and right hash, then add its index to the corresponding entries of the dictionary\n for i, kmer in local_index_to_kmer.items():\n kmer_L = kmer[:k_L]\n kmer_R = kmer[k_L:]\n local_index_to_kmer_L[i] = kmer_L\n local_index_to_kmer_R[i] = kmer_R\n kmer_L_hashes[kmer_to_int(kmer_L)] += [i]\n kmer_R_hashes[kmer_to_int(kmer_R)] += [i]\n\n #for each left hash in which there are multiple kmers with that left hash, find the list of pairs in which the right half differs by 2. (aka, if left half matches, recurse on right half).\n for kmer_L_hash_indices in kmer_L_hashes.values(): #same in first half\n if len(kmer_L_hash_indices) > 1:\n pairs += get_2away_pairs({kmer_L_hash_index:local_index_to_kmer[kmer_L_hash_index][k_L:] for kmer_L_hash_index in kmer_L_hash_indices}, k_R) #differ by 2 in right half\n\n #for each right hash in which there are multiple kmers with that right hash, find the list of pairs in which the left half differs by 2. (aka, if right half matches, recurse on left half).\n for kmer_R_hash_indices in kmer_R_hashes.values(): #same in second half\n if len(kmer_R_hash_indices) > 1:\n pairs += get_2away_pairs({kmer_R_hash_index:local_index_to_kmer[kmer_R_hash_index][:k_L] for kmer_R_hash_index in kmer_R_hash_indices}, k_L) #differ by 2 in left half\n\n #Find matching pairs where the left half is one away, and the right half is one away\n possible_pairs_L = set(get_1away_pairs(local_index_to_kmer_L,k_L))\n possible_pairs_R = set(get_1away_pairs(local_index_to_kmer_R,k_R))\n pairs += list(possible_pairs_L.intersection(possible_pairs_R))\n return(pairs)", "def _get_neighs_array_dynamic(self, k_is=[0]):\n neighs = self.idxs[k_is, :, :]\n return neighs", "def find_linked_clusters(self, data, index, cluster, linked_indices, re = False):\n\n if np.size(linked_indices) != 0.0:\n linked_clusters = [self.cluster_arr[1,ID] for ID in linked_indices \\\n if self.cluster_arr[1,ID] != -1]\n else:\n linked_clusters = []\n\n if len(linked_clusters) != 0:\n # Initial clustering\n\n if re is False:\n\n # Identify largest common ancestor of the linked clusters - the\n # antecessor.\n linked_clusters = [self.clusters[ID].antecessor for ID in linked_clusters]\n linked_clusters = remdup_preserve_order(linked_clusters)\n\n # Check to see if the data point satisfies the local conditions\n if self.method==1:\n linked_clusters = local_links(self, index, data, cluster, linked_clusters, re=re)\n\n # Check to see if the data point satisfied the global conditions\n if len(linked_clusters)>1:\n var = []\n for link in linked_clusters:\n var = get_var(self, data, cluster, link, var)\n linked_clusters, var = remove_outliers(self, data, cluster, linked_clusters, var, 5., 7.)\n\n else:\n # Relax phase\n\n # Get the linked clusters\n linked_clusters = [self.clusters[ID] for ID in linked_clusters]\n linked_clusters = remdup_preserve_order(linked_clusters)\n\n # Check to see if the data point satisfied the local conditions\n if self.method==1:\n linked_clusters = local_links(self, index, data, cluster, linked_clusters, re=re)\n\n if len(linked_clusters) >= 1:\n # Check to see if the data point satisfied the global conditions\n var = []\n for link in linked_clusters:\n var = get_var(self, data, cluster, link, var)\n linked_clusters, var = remove_outliers(self, data, cluster, linked_clusters, var, 5., 7.)\n\n # Now identify where the data point can be slotted into an already\n # established hierarchy\n antecessors = [link.antecessor for link in linked_clusters]\n antecessors = remdup_preserve_order(antecessors)\n antecessors = sorted(antecessors, key=get_cluster_idx, reverse=True)\n\n # Find out where the data point can be slotted in to an already\n # established hierarchy. This is based on the floor/ceiling\n # intensity level of the cluster in question. If you link\n # incorrectly then the hierarchy makes no sense.\n if len(antecessors)==1:\n linked_clusters = find_linked_clusters_single_antecessor(self, data, cluster, linked_clusters)\n else:\n linked_clusters = find_linked_clusters_multiple_antecessors(self, data, cluster, linked_clusters, antecessors)\n\n # If method = PPV then we need to check the linked clusters to prevent\n # velocity components from the same position from being linked to the\n # same cluster\n if len(linked_clusters) >= 1.0:\n if self.method == 1:\n if re is False:\n linked_clusters = multi_component_check(self, data, cluster, linked_clusters)\n else:\n linked_clusters = multi_component_check(self, data, cluster, linked_clusters, re = re )\n\n linked_clusters = sorted(linked_clusters, key=get_cluster_idx, reverse=True)\n\n return linked_clusters", "def make_idx_data(dataset, word_idx_map, k=300):\n for i in xrange(len(dataset['y'])):\n dataset['c'][i] = get_idx_from_sent(dataset['c'][i], word_idx_map, k)\n dataset['r'][i] = get_idx_from_sent(dataset['r'][i], word_idx_map, k)", "def neighbor_list(i, j, k, nx):\n left_center = (i-1, j, k)\n right_center = (i+1, j, k)\n top_center = (i, j+1, k)\n bottom_center = (i, j-1, k)\n left_up = (i, j, k + 1)\n left_down = (i, j, k -1)\n return np.mod([left_center, right_center, top_center, bottom_center, left_up, left_down], nx)", "def key_root_indices(self, lld_indices):\r\n # Cf. Zhang & Shasha:p.1251: \"LR_keyroots(T) = {k| there exists no k'>k\r\n # such that l(k)=l(k')}\r\n def get_number_leafs(lld_indices):\r\n counter = 0\r\n for i in range(len(lld_indices)):\r\n if i == lld_indices[i]:\r\n counter = counter + 1\r\n return counter\r\n \r\n kr = [0]*get_number_leafs(lld_indices)\r\n visited = [False]*len(lld_indices)\r\n k = len(kr)-1\r\n i = len(lld_indices)-1\r\n while k >= 0:\r\n if not visited[lld_indices[i]]:\r\n kr[k] = i\r\n k = k - 1\r\n visited[lld_indices[i]] = True\r\n i = i - 1\r\n return sorted(kr)", "def test_get_ijk_list():\n\n l0 = get_ijk_list(0)\n assert l0 == [\n [0, 0, 0]\n ]\n\n l1 = get_ijk_list(1)\n assert l1 == [\n [1, 0, 0],\n [0, 1, 0],\n [0, 0, 1]\n ]\n\n l2 = get_ijk_list(2)\n assert l2 == [\n [2, 0, 0],\n [1, 1, 0],\n [1, 0, 1],\n [0, 2, 0],\n [0, 1, 1],\n [0, 0, 2]\n ]", "def _set_neighs_general_list(self, key):\n ### WARNING: NOT WORK WITH EMPTY NEIGHS\n if '__len__' not in dir(key):\n self._set_neighs_number(key)\n else:\n if len(key) == 0:\n self._set_neighs_list_only(key)\n elif '__len__' not in dir(key[0]):\n self._set_neighs_list_only(key)\n else:\n if all([len(key[i]) == 0 for i in range(len(key))]):\n self._setted = False\n if self.staticneighs:\n self.idxs = np.array([[]])\n else:\n self.idxs = np.array([[[]]])\n elif '__len__' not in dir(key[0][0]):\n self._set_neighs_list_list(key)\n else:\n self._set_neighs_list_list_list(key)", "def reveal_sort(k, D, reverse=False):\n assert len(k) == len(D)\n library.break_point()\n shuffle = types.sint.get_secure_shuffle(len(k))\n k_prime = k.get_vector().secure_permute(shuffle).reveal()\n idx = types.Array.create_from(k_prime)\n if reverse:\n D.assign_vector(D.get_slice_vector(idx))\n library.break_point()\n D.secure_permute(shuffle, reverse=True)\n else:\n D.secure_permute(shuffle)\n library.break_point()\n v = D.get_vector()\n D.assign_slice_vector(idx, v)\n library.break_point()\n instructions.delshuffle(shuffle)", "def build_k_indices(y, k_fold, seed=1):\n \n num_row = y.shape[0]\n interval = int(num_row / k_fold)\n np.random.seed(seed)\n indices = np.random.permutation(num_row)\n k_indices = [indices[k * interval: (k + 1) * interval]\n for k in range(k_fold)]\n return np.array(k_indices)" ]
[ "0.5962983", "0.57616395", "0.57209855", "0.56889814", "0.567445", "0.5651179", "0.5565496", "0.55339205", "0.5526475", "0.5519671", "0.55177283", "0.55137473", "0.54557353", "0.54334563", "0.5427422", "0.54150856", "0.5399065", "0.5382154", "0.53808355", "0.5375292", "0.5372239", "0.53716", "0.5364934", "0.53506434", "0.5349942", "0.534587", "0.5337015", "0.5327365", "0.5323884", "0.5304408", "0.5302724", "0.530168", "0.5298989", "0.52849585", "0.5254803", "0.52263486", "0.5200557", "0.51954037", "0.5192561", "0.51854175", "0.5178516", "0.5173208", "0.5159232", "0.515772", "0.5151501", "0.51448447", "0.5144783", "0.5137706", "0.5136221", "0.51324105", "0.51298565", "0.5121033", "0.5120584", "0.51205724", "0.51064616", "0.51055264", "0.5100825", "0.5096856", "0.50932616", "0.5083777", "0.5081947", "0.50818604", "0.5076988", "0.507396", "0.507396", "0.50729203", "0.5072912", "0.5067115", "0.5059932", "0.50532746", "0.50456005", "0.50414205", "0.5035698", "0.50321454", "0.5030159", "0.50266474", "0.50227016", "0.50227016", "0.50175524", "0.50175524", "0.50171506", "0.50147253", "0.5010397", "0.5006934", "0.50067705", "0.5004781", "0.5000077", "0.4995132", "0.49930984", "0.49929345", "0.49913174", "0.497341", "0.49638113", "0.49621493", "0.4958798", "0.49552995", "0.49498132", "0.49389133", "0.49378577", "0.49346954" ]
0.57878196
1
Pour un vecteur v=(vx,vy) renvoie le vecteur colineaire a v de norme n
def normal(vx,vy,n): if vx==0: if vy==0: return (0,0) else: return (0,n) elif vy==0: return (n,0) else: return (n/sqrt(1+(vy/vx)**2),n/sqrt(1+(vx/vy)**2))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collpi2(Te,nev,v):\n return vcrit(Te)/2./v**3*collnu(Te,nev)", "def collnud(Te,nev,v):\n return vcrit(Te)/2./v**3*collnu(Te,nev)", "def f_v(_a, _vs, _Ps, _Ps0): # _aはスカラ, _vsはベクトル, _Ps, _Ps0は3行2列の行列\n center_pos = _Ps[0]\n center_pos_0 = _Ps0[0]\n idx_iter = Index_iterator(1, 8)\n #中心点から各点へのベクトル\n x = []\n x0 = []\n for p in (_Ps):\n x.append(p - center_pos)\n for p in _Ps(_Ps0):\n x0.append(p - center_pos_0)\n\n x01 = (_Ps[1]-center_pos) \n x02 = (_Ps[2]-center_pos) \n x03 = (_Ps[3]-center_pos) \n x04 = (_Ps[4]-center_pos) \n x05 = (_Ps[5]-center_pos) \n x06 = (_Ps[6]-center_pos) \n x07 = (_Ps[7]-center_pos) \n x08 = (_Ps[8]-center_pos)\n print('p_id', center_pos, end='\\t')\n print('x01:', x01, end=\"\\t\")\n print('x03:', x03, end=\"\\t\")\n print('x05:', x05, end=\"\\t\")\n print('x07:', x07)\n x001 = (_Ps0[1]-_Ps0[0]) \n x002 = (_Ps0[2]-_Ps0[0]) \n x003 = (_Ps0[3]-_Ps0[0]) \n x004 = (_Ps0[4]-_Ps0[0]) \n x005 = (_Ps0[5]-_Ps0[0]) \n x006 = (_Ps0[6]-_Ps0[0]) \n x007 = (_Ps0[7]-_Ps0[0]) \n x008 = (_Ps0[8]-_Ps0[0]) \n \n #中心点周りの面の面積\n def calc_area(j,k,l):\n s = LA.norm(np.cross(x[j],x[k]))/2 \\\n + LA.norm(np.cross(x[k],x[l]))/2\n return s\n\n s = []\n s0 = []\n hen = [1,3,5,7]\n for i in range(4):\n j,k,l = [n for n in idx_iter.get_indexes(start_idx=hen[i], 3)]\n s[i] = calc_area(j,k,l)\n s0[i] = calc_area(j,k,l)\n\n # s0123 = LA.norm(np.cross(x[1],x[2]))/2\\\n # +LA.norm(np.cross(x[2],x[3]))/2\n # s4367 = LA.norm(np.cross(x[3],x[4]))/2\\\n # +LA.norm(np.cross(x[4],x[5]))/2\n # s4785 = LA.norm(np.cross(x[5],x[6]))/2\\\n # +LA.norm(np.cross(x[6],x[7]))/2\n # s4521 = LA.norm(np.cross(x[7],x[8]))/2\\\n # +LA.norm(np.cross(x[8],x[1]))/2\n # s04103 = LA.norm(np.cross(x0[1],x0[2]))/2\\\n # +LA.norm(np.cross(x0[2],x0[3]))/2\n # s04367 = LA.norm(np.cross(x0[3],x0[4]))/2\\\n # +LA.norm(np.cross(x0[4],x0[7]))/2\n # s04785 = LA.norm(np.cross(x0[7],x0[8]))/2\\\n # +LA.norm(np.cross(x0[8],x0[5]))/2\n # s04521 = LA.norm(np.cross(x0[5],x0[2]))/2\\\n # +LA.norm(np.cross(x0[2],x0[1]))/2\n \n #各方向への平均面積(ここだけ反時計回り順で設定してる)\n S_iminus = (s[1] + s[2]) / 2 #43方向\n S_Jminus = (s[1] + s[4]) / 2 #41方向\n S_iplus = (s[3] + s[4]) / 2 #45方向\n S_Jplus = (s[3] + s[2]) / 2 #47方向\n S_iminus0 = (s0[1] + s0[2]) / 2 #43方向\n S_Jminus0 = (s0[1] + s0[4]) / 2 #41方向\n S_iplus0 = (s0[3] + s0[4]) / 2 #45方向\n S_Jplus0 = (s0[3] + s0[2]) / 2 #47方向\n # 各方向への厚み\n h_iminus = h_0 / ((poisson/(1-poisson) * (S_iminus - S_iminus0) / S_iminus0) + 1) #43方向\n h_Jminus = h_0 / ((poisson/(1-poisson) * (S_Jminus - S_Jminus0) / S_Jminus0) + 1) #41方向\n h_iplus = h_0 / ((poisson/(1-poisson) * (S_iplus - S_iplus0) / S_iplus0) + 1) #45方向\n h_Jplus = h_0 / ((poisson/(1-poisson) * (S_Jplus - S_Jplus0) / S_Jplus0) + 1) #47方向\n # 各断片の重心\n g = []\n kado = [2,4,6,8]\n hen = [1,3,5,7]\n for i in range(len(kado)):\n _kado = kado[i]\n _hen1, _ = [idx for idx in idx_iter.get_indexes_reverse(_kado, 2)]\n _hen2, _ = [idx for idx in idx_iter.get_indexes(_kado, 2)]\n _hen = [_hen1, _hen2]\n _g1 = (center_pos + _Ps[_kado] + _Ps[_hen1])/3\n _g2 = (center_pos + _Ps[_kado] + _Ps[_hen2])/3\n g.append([_g1, _g2])\n\n g401 = (center_pos + _Ps[0] + _Ps[1]) / 3\n g430 = (center_pos + _Ps[3] + _Ps[0]) / 3\n g436 = (center_pos + _Ps[3] + _Ps[6]) / 3\n g467 = (center_pos + _Ps[6] + _Ps[7]) / 3\n g478 = (center_pos + _Ps[7] + _Ps[8]) / 3\n g485 = (center_pos + _Ps[8] + _Ps[5]) / 3\n g452 = (center_pos + _Ps[5] + _Ps[2]) / 3\n g421 = (center_pos + _Ps[2] + _Ps[1]) / 3\n g0401 = (_Ps0[4] + _Ps0[0] + _Ps0[1]) / 3\n g0430 = (_Ps0[4] + _Ps0[3] + _Ps0[0]) / 3\n g0436 = (_Ps0[4] + _Ps0[3] + _Ps0[6]) / 3\n g0467 = (_Ps0[4] + _Ps0[6] + _Ps0[7]) / 3\n g0478 = (_Ps0[4] + _Ps0[7] + _Ps0[8]) / 3\n g0485 = (_Ps0[4] + _Ps0[8] + _Ps0[5]) / 3\n g0452 = (_Ps0[4] + _Ps0[5] + _Ps0[2]) / 3\n g0421 = (_Ps0[4] + _Ps0[2] + _Ps0[1]) / 3\n \n # 各断片面積\n triangle_area = []\n kado = [2,4,6,8]\n for i in range(len(kado)):\n j, k = [idx for idx in idx_iter.get_indexes_reverse(kado[i], 1)]\n _s1 = LA.norm(np.cross(x[j],x[k]))/2\n j, k = [idx for idx in idx_iter.get_indexes(kado[i], 1)]\n _s2 = LA.norm(np.cross(x[j],x[k]))/2\n triangle_area.append([_s1, _s2])\n\n s410 = LA.norm(np.cross(x[1],x[2]))/2\n s403 = LA.norm(np.cross(x[2],x[3]))/2\n s436 = LA.norm(np.cross(x[3],x[4]))/2\n s467 = LA.norm(np.cross(x[4],x[5]))/2\n s478 = LA.norm(np.cross(x[5],x[6]))/2\n s485 = LA.norm(np.cross(x[6],x[7]))/2\n s452 = LA.norm(np.cross(x[7],x[8]))/2\n s421 = LA.norm(np.cross(x[8],x[1]))/2\n s0410 = LA.norm(np.cross(x0[1],x0[2]))/2\n s0403 = LA.norm(np.cross(x0[2],x0[3]))/2\n s0436 = LA.norm(np.cross(x0[3],x0[4]))/2\n s0467 = LA.norm(np.cross(x0[4],x0[5]))/2\n s0478 = LA.norm(np.cross(x0[5],x0[6]))/2\n s0485 = LA.norm(np.cross(x0[6],x0[7]))/2\n s0452 = LA.norm(np.cross(x0[7],x0[8]))/2\n s0421 = LA.norm(np.cross(x0[8],x0[1]))/2\n # 四角の重心\n\n center_g_square = []\n for i in range(len(g)):\n _g = (triangle_area[i][0]*g[i][0] + triangle_area[i][1]*g[i][1])/(triangle_area[i][0] + triangle_area[i][1])\n center_g.append(_g)\n g4103 = (s410*g401 + s403*g430) / (s410 + s403)\n g4367 = (s436*g436 + s467*g467) / (s436 + s467)\n g4785 = (s478*g478 + s485*g485) / (s478 + s485)\n g4521 = (s452*g452 + s421*g421) / (s452 + s421)\n g04103 = (s0410*g0401 + s0403*g0430) / (s0410 + s0403)\n g04367 = (s0436*g0436 + s0467*g0467) / (s0436 + s0467)\n g04785 = (s0478*g0478 + s0485*g0485) / (s0478 + s0485)\n g04521 = (s0452*g0452 + s0421*g0421) / (s0452 + s0421)\n # 各重心間の距離\n Lj82 = LA.norm(g4521 - g4103)\n Lj24 = LA.norm(g4103 - g4367)\n Lj46 = LA.norm(g4367 - g4785)\n Lj68 = LA.norm(g4785 - g4521)\n \n # ひずみ\n eps_i41 = (LA.norm(x01) - LA.norm(x041)) / LA.norm(x041)\n eps_J41 = (LA.norm(g4521 - g4103) - LA.norm(g04521 - g04103)) / LA.norm(g04521 - g04103)\n eps_i43 = (LA.norm(x03) - LA.norm(x043)) / LA.norm(x043)\n eps_J43 = (LA.norm(g4103 - g4367) - LA.norm(g04103 - g04367)) / LA.norm(g04103 - g04367)\n eps_i47 = (LA.norm(x01) - LA.norm(x041)) / LA.norm(x041)\n eps_J47 = (LA.norm(g4367 - g4785) - LA.norm(g04367 - g04785)) / LA.norm(g04367 - g04785)\n eps_i45 = (LA.norm(x01) - LA.norm(x041)) / LA.norm(x041)\n eps_J45 = (LA.norm(g4785 - g4521) - LA.norm(g04785 - g04521)) / LA.norm(g04785 - g04521)\n # 張力\n F_T1 = (young_modulus * h_Jminus * Lj82 * (eps_i41 + poisson * eps_J41) / (1 - poisson**2))*x01/LA.norm(x01)\n F_T3 = (young_modulus * h_iminus * Lj24 * (eps_i43 + poisson * eps_J43) / (1 - poisson**2))*x03/LA.norm(x03)\n F_T5 = (young_modulus * h_Jplus * Lj46 * (eps_i47 + poisson * eps_J47) / (1 - poisson**2))*x05/LA.norm(x05)\n F_T7 = (young_modulus * h_iplus * Lj68 * (eps_i45 + poisson * eps_J45) / (1 - poisson**2))*x07/LA.norm(x07)\n # せん断ひずみ\n gamma513 = (math.acos((np.dot(x07,x01))/(LA.norm(x07)*LA.norm(x01))) - math.acos((np.dot(x045,x041))/(LA.norm(x045)*LA.norm(x041)))\\\n + math.acos((np.dot(x03,x01))/(LA.norm(x03)*LA.norm(x01))) - math.acos((np.dot(x043,x041))/(LA.norm(x043)*LA.norm(x041))))/2\n gamma137 = (math.acos((np.dot(x01,x03))/(LA.norm(x01)*LA.norm(x03))) - math.acos((np.dot(x041,x043))/(LA.norm(x041)*LA.norm(x043)))\\\n + math.acos((np.dot(x03,x05))/(LA.norm(x03)*LA.norm(x05))) - math.acos((np.dot(x043,x047))/(LA.norm(x043)*LA.norm(x047))))/2\n gamma375 = (math.acos((np.dot(x05,x03))/(LA.norm(x05)*LA.norm(x03))) - math.acos((np.dot(x047,x043))/(LA.norm(x047)*LA.norm(x043)))\\\n + math.acos((np.dot(x07,x05))/(LA.norm(x07)*LA.norm(x05))) - math.acos((np.dot(x045,x047))/(LA.norm(x045)*LA.norm(x047))))/2\n gamma751 = (math.acos((np.dot(x05,x07))/(LA.norm(x05)*LA.norm(x07))) - math.acos((np.dot(x047,x045))/(LA.norm(x047)*LA.norm(x045)))\\\n + math.acos((np.dot(x07,x01))/(LA.norm(x07)*LA.norm(x01))) - math.acos((np.dot(x045,x041))/(LA.norm(x045)*LA.norm(x041))))/2\n # せん断力\n F_S41 = ((young_modulus * h_Jminus * LA.norm(x01) * gamma513)/(2 * (1 + poisson)))*x01/LA.norm(x01)\n F_S43 = ((young_modulus * h_Jminus * LA.norm(x03) * gamma137)/(2 * (1 + poisson)))*x03/LA.norm(x03)\n F_S47 = ((young_modulus * h_Jminus * LA.norm(x05) * gamma375)/(2 * (1 + poisson)))*x05/LA.norm(x05)\n F_S45 = ((young_modulus * h_Jminus * LA.norm(x07) * gamma751)/(2 * (1 + poisson)))*x07/LA.norm(x07)\n \n # J方向の曲げ力\n n_j_cross = np.cross(x05, x01)\n if any(n_j_cross):\n n_J = n_j_cross/LA.norm(n_j_cross)\n else: \n\n l_Jalfa = LA.norm(_Ps[1] - _Ps[7])\n cos_Jalfa = (LA.norm(x01)**2 + LA.norm(x05)**2 - l_Jalfa**2) / (2 * LA.norm(x01) * LA.norm(x05))\n if cos_Jalfa > 1.0:\n cos_Jalfa = 1.0\n elif cos_Jalfa < -1.0:\n cos_Jalfa = -1.0\n sin_Jalfa = math.sqrt(1 - cos_Jalfa**2)\n CJa2 = math.sqrt((cos_Jalfa + 1)/2)\n SJa2 = math.sqrt((1 - cos_Jalfa)/2)\n zJC = (_Ps[7][2]-_Ps[1][2])/(_Ps[7][0]-_Ps[1][0]) * (center_pos[0]-_Ps[1][0]) + _Ps[1][2] #曲げ力の方向の場合わけに必要\n if center_pos[2] > zJC:\n e_j = np.dot(np.array([[CJa2 + (n_J[0]**2) * (1 - CJa2), n_J[0] * n_J[1] * (1 - CJa2) + n_J[2] * SJa2, n_J[0] * n_J[2] * (1 - CJa2) - n_J[1] * SJa2],\\\n [n_J[1] * n_J[0] * (1 - CJa2) - n_J[2] * SJa2, CJa2 + (n_J[1]**2) * (1 - CJa2), n_J[1] * n_J[2] * (1 - CJa2) + n_J[0] * SJa2],\\\n [n_J[2] * n_J[0] * (1 - CJa2) + n_J[1] * SJa2, n_J[2] * n_J[1] * (1 - CJa2) - n_J[0] * SJa2, CJa2 + (n_J[2]**2) * (1 - CJa2)]]), (_Ps[7] - center_pos)/LA.norm(_Ps[7] - center_pos))\n else:\n e_j = np.dot(np.array([[CJa2 + (n_J[0]**2) * (1 - CJa2), n_J[0] * n_J[1] * (1 - CJa2) - n_J[2] * SJa2, n_J[0] * n_J[2] * (1 - CJa2) + n_J[1] * SJa2],\\\n [n_J[1] * n_J[0] * (1 - CJa2) + n_J[2] * SJa2, CJa2 + (n_J[1]**2) * (1 - CJa2), n_J[1] * n_J[2] * (1 - CJa2) - n_J[0] * SJa2],\\\n [n_J[2] * n_J[0] * (1 - CJa2) - n_J[1] * SJa2, n_J[2] * n_J[1] * (1 - CJa2) + n_J[0] * SJa2, CJa2 + (n_J[2]**2) * (1 - CJa2)]]), (_Ps[7] - center_pos)/LA.norm(_Ps[7] - center_pos))\n d_etha_J = (2 * sin_Jalfa / l_Jalfa) - (2 * math.sqrt(1 - np.dot(x041,x047)**2/(LA.norm(x041)*LA.norm(x047))**2)/(LA.norm(x041 - x047)))\n\n n_i = np.cross(x07,x03)/LA.norm(np.cross(x03,x07)) \n cos_ialfa = np.dot(x03,x07) / (LA.norm(x03) * LA.norm(x07))\n sin_ialfa = math.sqrt(1 - cos_ialfa**2)\n Cia2 = math.sqrt((cos_ialfa + 1)/2)\n Sia2 = math.sqrt((1 - cos_ialfa)/2)\n ziC = (_Ps[5][2]-_Ps[3][2])/(_Ps[5][0]-_Ps[3][0]) * (center_pos[0]-_Ps[3][0]) + _Ps[3][2]\n if center_pos[2] > ziC:\n e_i = np.dot(np.array([[Cia2 + (n_i[0]**2) * (1 - Cia2), n_i[0] * n_i[1] * (1 - Cia2) + n_i[2] * Sia2, n_i[0] * n_i[2] * (1 - Cia2) - n_i[1] * Sia2],\\\n [n_i[1] * n_i[0] * (1 - Cia2) - n_i[2] * Sia2, Cia2 + (n_i[1]**2) * (1 - Cia2), n_i[1] * n_i[2] * (1 - Cia2) + n_i[0] * Sia2],\\\n [n_i[2] * n_i[0] * (1 - Cia2) + n_i[1] * Sia2, n_i[2] * n_i[1] * (1 - Cia2) - n_i[0] * Sia2, Cia2 + (n_i[2]**2) * (1 - Cia2)]]), (_Ps[7] - center_pos)/LA.norm(_Ps[7] - center_pos))\n else:\n e_i = np.dot(np.array([[Cia2 + (n_i[0]**2) * (1 - Cia2), n_i[0] * n_i[1] * (1 - Cia2) - n_i[2] * Sia2, n_i[0] * n_i[2] * (1 - Cia2) + n_i[1] * Sia2],\\\n [n_i[1] * n_i[0] * (1 - Cia2) + n_i[2] * Sia2, Cia2 + (n_i[1]**2) * (1 - Cia2), n_i[1] * n_i[2] * (1 - Cia2) - n_i[0] * Sia2],\\\n [n_i[2] * n_i[0] * (1 - Cia2) - n_i[1] * Sia2, n_i[2] * n_i[1] * (1 - Cia2) + n_i[0] * Sia2, Cia2 + (n_i[2]**2) * (1 - Cia2)]]), (_Ps[5] - center_pos)/LA.norm(_Ps[5] - center_pos))\n d_etha_i = (2 * sin_ialfa / LA.norm(x07 - x03)) - (2 * math.sqrt(1 - np.dot(x043,x045)**2/(LA.norm(x043)*LA.norm(x045))**2)/(LA.norm(x043 - x045)))\n\n\n l_J = (Lj20 + Lj06 + Lj68 + Lj82) / 4\n h = (h_iminus + h_iplus + h_Jminus + h_Jplus) / 4\n I = (l_J * h**3) / 12\n M_i = (young_modulus * I * (d_etha_i + poisson * d_etha_J)/(1 - poisson**2))\n M_J = (young_modulus * I * (d_etha_J + poisson * d_etha_i)/(1 - poisson**2))\n #曲げ力\n F_Bi = M_i / LA.norm(x03) + M_i / LA.norm(x07) * e_i\n F_BJ = M_J / LA.norm(x01) + M_J / LA.norm(x05) * e_j\n #空気力\n # S = (S_iminus + S_iplus + S_Jminus + S_Jplus) / 4\n # F_A = p * S\n F_A = np.array([0.0, 0.0, -0.1]) * _a\n\n # 運動方程式(支配方程式)\n S_0 = (S_iminus0 + S_iplus0 + S_Jminus0 + S_Jplus0) / 4\n F_T = F_T41 + F_T43 + F_T45 + F_T47\n F_S = F_S41 + F_S43 + F_S45 + F_S47\n F_B = F_Bi + F_BJ\n return (F_T + F_S + F_B + F_A) / (rho * h_0 * S_0) - c * _vs", "def V_vect(self, points):\n return self.A_conf*norm(points)*self.isOutside(points)", "def vector_perp(v):\n assert len(v) == 2\n x, y = v\n return Vector(-y, x)", "def vel_inicial(x): #Velocidad inicial como un vector de ceros\r\n return np.zeros_like(x)", "def LD_Vx_Vy(self, x, y):\n\t\tself.V[x] = self.V[y]", "def edge_velocity(self):\n #reflext x values at x edges\n self.u[1,:,0] = -self.u[1,:,1]\n self.u[1,:,-1] = -self.u[1,:,-2]\n #mirror x values at y edges \n self.u[1,0,:] = self.u[1,1,:]\n self.u[1,-1,:] = self.u[1,-2,:]\n #mirror y values at x edges\n self.u[0,:,0] = self.u[0,:,1]\n self.u[0,:,-1] = self.u[0,:,-2]\n #mirror y values at y edges \n self.u[0,0,:] = -self.u[0,1,:]\n self.u[0,-1,:] = -self.u[0,-2,:]", "def coordinate_in_lattice(v,n,m):\n y=int(v/n);\n x=(v%n);\n\n return (x,y)", "def get_vertical_vector(q):\n P0, P1, P2, P3 = q\n P0_up = copy.deepcopy(P0)\n P0_up.depth = P0_up.depth - 1.0\n p0 = Vector.fromPoint(P0) # fromPoint converts to ECEF\n p1 = Vector.fromPoint(P0_up)\n v1 = (p1 - p0).norm()\n return v1", "def SNE_Vx_Vy(self, x, y):\n\t\tif self.V[x] != self.V[y]:\n\t\t\tself.IP += 2", "def uv(vec):\n return vec / sqrt(dot(vec, vec))", "def atualizaVertice(self, v = []):\r\n\r\n #reseta as arestas para d0\r\n #for a in self.arestas:\r\n # a.peso = a.d0\r\n\r\n for vertice in v:\r\n for a in self.arestas:\r\n if (vertice.id == a.v1.id):\r\n #print (\"atualiza aresta\", a.id)\r\n if (a.v2.atualizado):\r\n a.peso = a.d2\r\n else:\r\n a.peso = a.d1\r\n\r\n\r\n if (vertice.id == a.v2.id):\r\n #print (\"atualiza aresta\", a.id)\r\n if (a.v1.atualizado):\r\n a.peso = a.d2\r\n else:\r\n a.peso = a.d1\r\n \r\n vertice.atualizado = True\r\n \r\n for vertice in v:\r\n vertice.atualizado = False", "def SUBN_Vx_Vy(self, x, y):\n\t\tself.VF = 1 if self.V[y] > self.V[x] else 0\n\t\tself.V[x] = self.V[y] - self.V[x]\n\t\tif self.V[x] < 0:\n\t\t\tself.V[x] = self.V[x] + 256", "def uvmap(self, p):\n # bottom left corner of the plane\n p00 = self.position - (self.sx * self.n0) / 2 - (self.sy * self.n1) / 2\n dif_vector = p - p00\n u = np.dot(dif_vector, self.n0) / self.sx\n v = np.dot(dif_vector, self.n1) / self.sy\n return u, v", "def test_set_vx_to_vx_minus_vy(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8005 | (x << 8) | (y << 4)\n for v1 in range(0x0, 0xFF):\n for v2 in range(0x0, 0xFF):\n cpu.V_register[x] = v1\n cpu.V_register[y] = v2\n cpu.set_vx_to_vx_minus_vy()\n value = v1 - v2\n if value > 0:\n assert(cpu.V_register[0xF] == 1)\n else:\n assert(cpu.V_register[0xF] == 0)\n if value >= 0:\n assert(cpu.V_register[x] == value)\n else:\n assert(cpu.V_register[x] == 0x100 + value)", "def vecteur_image(T,x,y):\n a,b,c,d = T[0][0],T[0][1],T[1][0],T[1][1]\n xx = a*x + b*y\n yy = c*x + d*y\n return xx,yy", "def test_set_vx_to_vy_minus_vx(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8007 | (x << 8) | (y << 4)\n for v1 in range(0x0, 0xFF):\n for v2 in range(0x0, 0xFF):\n cpu.V_register[x] = v1\n cpu.V_register[y] = v2\n cpu.set_vx_to_vy_minus_vx()\n value = v2 - v1\n if value > 0:\n assert(cpu.V_register[0xF] == 1)\n else:\n assert(cpu.V_register[0xF] == 0)\n if value >= 0:\n assert(cpu.V_register[x] == value)\n else:\n assert(cpu.V_register[x] == 0x100 + value)", "def plane_point_side_v3(p: np.ndarray, v: np.ndarray) -> Any:\n return p[:3].dot(v) + p[3]", "def project_onto_plane(vect):\n x, y, z = vect\n \n return (x, y, 0.)", "def V(self, point = -1):\n return self.solution('V', point)", "def constrain(v2,w,h):\n if v2.x > w:\n v2.x = w\n if v2.x < 0:\n v2.x = 0 \n if v2.y > h:\n v2.y = h\n if v2.y < 0:\n v2.y = 0\n return v2", "def find_perpendicular_vector(vt):\n x, y = vt\n return np.array([y, -x])", "def vect_contract(m, c, n):\n a = np.tensordot(m, c, (0, 0))\n mn = np.tensordot(a, n, (2, 0))\n return mn", "def proyZm1(u, v, t1):\n den = u ** 2 + v ** 2 + 4\n x = u - t1 * (u - 4 * u / den)\n y = v - t1 * (v - 4 * v / den)\n z = -1 - t1 * (-2 + 8 / den)\n return (x, y, z)", "def corners((u,v)):\r\n return ((u+1,v+1), (u+1,v), (u,v), (u,v+1))", "def vecvel(x, SAMPLING, TYPE=2):\n\n dims = x.shape\n N = dims[0]\n v = np.zeros((N, dims[1]))\n # print(v.shape)\n\n if TYPE == 2:\n v[2:(N - 3)] = SAMPLING / 6 * (x[5:N].values + x[4:(N - 1)].values - x[2:(N - 3)].values - x[1:(\n N - 4)].values) # SAMPLING/6*(x[5:N,] + x[4:(N-1),] - x[2:(N-3),] - x[1:(N-4),])\n v[1] = SAMPLING / 2 * (x[3:4].values - x[1:2].values) # SAMPLING/2*(x[3,:] - x[1,:])\n v[(N - 1)] = SAMPLING / 2 * (\n x[N - 1:N].values - x[(N - 3):(N - 2)].values) # SAMPLING/2*(x[N,:] - x[(N-2),:])\n else:\n v[2:(N - 2)] = SAMPLING / 2 * (x[3:N - 1].values - x[1:(N - 3)].values) # SAMPLING/2*(x[3:N,:] - x[1:(N-2),:])\n\n return (v)", "def test_set_vx_to_vy(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8000 | (x << 8) | (y << 4)\n for v in range(0x0, 0xFF):\n cpu.V_register[y] = v\n cpu.set_vx_to_vy()\n assert(cpu.V_register[x] == v)", "def v(self):\n return Vector2(self.position)", "def afficher_V(V, titre=\"Fonction valeur de la politique selon méthode de Monte Carlo première visite\"):\n # Déterminer les quadrillages des axes X et Y\n min_x = min(etat[0] for etat in V.keys()) # axe des x : main du joueur\n max_x = max(etat[0] for etat in V.keys())\n min_y = min(etat[1] for etat in V.keys()) # axe des y : main de la banque\n max_y = max(etat[1] for etat in V.keys())\n\n x_range = np.arange(min_x, max_x + 1)\n y_range = np.arange(min_y, max_y + 1)\n X, Y = np.meshgrid(x_range, y_range)\n\n # Rassembler les valeurs de z pour tous les (x, y) : distinguer les cas avec et sans as utilisable\n Z_sans_as = np.apply_along_axis(lambda _: V[(_[0], _[1], False)], 2, np.dstack([X, Y]))\n Z_as = np.apply_along_axis(lambda _: V[(_[0], _[1], True)], 2, np.dstack([X, Y]))\n\n def afficher_surface(X, Y, Z, titre):\n fig = plt.figure(figsize=(20, 10))\n ax = fig.add_subplot(111, projection='3d')\n surface = ax.plot_surface(X, Y, Z, rstride=1, cstride=1,\n cmap=matplotlib.cm.Reds, vmin=-1.0, vmax=1.0)\n ax.set_xlabel('Total joueur')\n ax.set_ylabel('Carte visible banque')\n ax.set_zlabel('Valeur')\n ax.set_title(titre)\n ax.view_init(ax.elev, -140)\n fig.colorbar(surface)\n plt.show()\n\n afficher_surface(X, Y, Z_sans_as, \"{} (Sans as utilisable)\".format(titre))\n afficher_surface(X, Y, Z_as, \"{} (Avec as utilisable)\".format(titre))", "def vec_node(self):\r\n\r\n xv = np.arange(self.ox, self.lx + self.ox + self.dx, self.dx)\r\n yv = np.arange(self.oy, self.ly + self.oy + self.dy, self.dy)\r\n zv = np.arange(self.oz, self.lz + self.oz + self.dz, self.dz)\r\n\r\n return xv, yv, zv", "def test_set_vx_to_vx_xor_vy(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8003 | (x << 8) | (y << 4)\n for v1 in range(0x0, 0xFF):\n for v2 in range(0x0, 0xFF):\n cpu.V_register[x] = v1\n cpu.V_register[y] = v2\n cpu.set_vx_to_vx_xor_vy()\n assert(cpu.V_register[x] == v1 ^ v2)", "def project_vector(u, v):\n u_np = np.array([u.get_x(), u.get_y()])\n v_np = np.array([v.get_x(), v.get_y()])\n proj = (np.dot(u_np, v_np) / np.dot(v_np, v_np)) * v_np\n return Point(proj[0], proj[1])", "def __init__(self,v0,v1):\n self.vinputs = v0,v1\n self.xhi = max([v0[0],v1[0]])\n self.yhi,self.ylo = v0[1]>v1[1] and (v0[1],v1[1],) or (v1[1],v0[1])\n\n self.m = (v1[0]-v0[0]) / (v1[1]-v0[1]) ### (x1-x0)/(y1-y0)\n self.b = v0[0] - (v0[1] * self.m) ### x0 - y0*(x1-x0)/(y1-y0)", "def project (u, v):\r\n\r\n # Construct linear system Ap = d\r\n A = sps.lil_matrix ((width*height, width*height))\r\n d = np.zeros ((width*height))\r\n\r\n for i in range (1, height-1):\r\n for j in range (1, width-1):\r\n A[index(i,j), index(i,j)] = 4\r\n A[index(i,j), index(i-1,j)] = -1\r\n A[index(i,j), index(i+1,j)] = -1\r\n A[index(i,j), index(i,j-1)] = -1\r\n A[index(i,j), index(i,j+1)] = -1\r\n \r\n d[index(i,j)] = -1/h * (u[i,j] - u[i,j-1] + v[i,j] - v[i-1,j])\r\n\r\n # Unhandled boundary cases, we assume solid walls that don't move\r\n A[index(0,0), index(0,0)] = 2\r\n A[index(0,0), index(1,0)] = -1\r\n A[index(0,0), index(0,1)] = -1\r\n d[index(0,0)] = -1/h * (u[0,0] + v[0,0])\r\n\r\n A[index(height-1,0), index(0,0)] = 2\r\n A[index(height-1,0), index(height-1,1)] = -1\r\n A[index(height-1,0), index(height-2,0)] = -1\r\n d[index(height-1,0)] = -1/h * (u[height-1,0] - v[height-2,0])\r\n\r\n A[index(0,width-1), index(0,width-1)] = 2\r\n A[index(0,width-1), index(1,width-1)] = -1\r\n A[index(0,width-1), index(0,width-2)] = -1\r\n d[index(0,width-1)] = -1/h * (-u[0,width-2] + v[0,width-1])\r\n\r\n A[index(height-1,width-1), index(height-1,width-1)] = 2\r\n A[index(height-1,width-1), index(height-2,width-1)] = -1\r\n A[index(height-1,width-1), index(height-1,width-2)] = -1\r\n d[index(height-1,width-1)] = -1/h * (-u[height-1,width-2] - v[height-2,width-1])\r\n\r\n\r\n for i in range (1, height-1):\r\n A[index(i,0), index(i,0)] = 3\r\n A[index(i,0), index(i-1,0)] = -1\r\n A[index(i,0), index(i+1,0)] = -1\r\n A[index(i,0), index(i,1)] = -1\r\n d[index(i,0)] = -1/h * (u[i,0] + v[i,0] - v[i-1,0])\r\n\r\n for i in range (1, height-1):\r\n A[index(i,width-1), index(i,width-1)] = 3\r\n A[index(i,width-1), index(i-1,width-1)] = -1\r\n A[index(i,width-1), index(i+1,width-1)] = -1\r\n A[index(i,width-1), index(i,width-2)] = -1\r\n d[index(i,width-1)] = -1/h * (- u[i,width-2] + v[i, width-1] - v[i-1,width-1])\r\n\r\n for j in range (1, width-1):\r\n A[index(0,j), index(0,j)] = 3\r\n A[index(0,j), index(1,j)] = -1\r\n A[index(0,j), index(0,j-1)] = -1\r\n A[index(0,j), index(0,j+1)] = -1\r\n d[index(0,j)] = -1/h * (u[0,j] - u[0,j-1] + v[0,j])\r\n \r\n for j in range (1, width-1):\r\n A[index(height-1,j), index(height-1,j)] = 3\r\n A[index(height-1,j), index(height-2,j)] = -1\r\n A[index(height-1,j), index(height-1,j-1)] = -1\r\n A[index(height-1,j), index(height-1,j+1)] = -1\r\n d[index(height-1,j)] = -1/h * (u[height-1,j] - u[height-1,j-1] - v[height-2,j])\r\n\r\n\r\n A = A * dt / (density * h**2)\r\n\r\n A = sps.csr_matrix (A)\r\n p = np.reshape(spsolve (A, d), (height, width))\r\n\r\n # Calculate new velocity field based on this pressure field\r\n for i in range (height):\r\n for j in range (width):\r\n if (i == height-1 and j == width-1) or (i == height-1 and j == 0) or (i == 0 and j == width-1) or (i == 0 and j == 0):\r\n # Set vertical velocity to movement of solid wall 0\r\n u[i,j] = 0\r\n v[i,j] = 0\r\n elif i == height-1 or i == 0:\r\n u[i,j] = u[i,j] - dt / (density * h) * (p[i,j+1] - p[i,j])\r\n v[i,j] = 0\r\n elif j == width-1 or j == 0:\r\n u[i,j] = 0\r\n v[i,j] = v[i,j] - dt / (density * h) * (p[i+1,j] - p[i,j])\r\n else:\r\n u[i,j] = u[i,j] - dt / (density * h) * (p[i,j+1] - p[i,j])\r\n v[i,j] = v[i,j] - dt / (density * h) * (p[i+1,j] - p[i,j])\r\n\r\n # let's get some inflow\r\n u[4:12, 0] = 1\r\n\r\n return u, v, p", "def __rmul__(self,nb):\n\t\treturn Vect2D(nb*self._vec)", "def getVerticePosition(self):\n #def getvoxelpos(model,scale,dims,translate,i,j,k): #centroid!\n return(self.X,self.Y,self.Z)", "def compute_normalvect(self):\n normvect = np.zeros((len(self.tri_pnts),3,3))\n zvec = np.array([0, 0, 1])\n for itri, tri in enumerate(self.tri_pnts):\n #import pdb; pdb.set_trace()\n tri0, tri1, tri2 = tri\n x1,y1 = self.points[tri1]-self.points[tri0]\n v1 = np.array([x1,y1,0])\n x2,y2 = self.points[tri2]-self.points[tri1]\n v2 = np.array([x2,y2,0])\n x3,y3 = self.points[tri0]-self.points[tri2]\n v3 = np.array([x3,y3,0])\n v1 = v1/np.linalg.norm(v1)\n v2 = v2/np.linalg.norm(v2)\n v3 = v3/np.linalg.norm(v3)\n #import pdb; pdb.set_trace()\n normvect[itri,:,:] = np.cross(v1,zvec), np.cross(v2,zvec), np.cross(v3,zvec)\n #import pdb; pdb.set_trace()\n return normvect", "def V_vect(self, distances):\n distances_norm2 = norm2(distances)\n distances_norm = np.sqrt(distances_norm2)\n isColliding = self.isColliding(distances_norm)\n\n # Collision term proportional to d**2 (cutoff)\n v_colliding = -distances_norm2/self.d_coll**2 + 1.5+0.5 * \\\n (self.d_attr/self.d_coll)**(2*self.n) - (self.d_attr/self.d_coll)**self.n\n v_colliding *= isColliding\n\n # Interaction potential: d - ln d\n v_interact = 0.5*self.d_attr**(2*self.n)/(np.identity(np.shape(distances_norm2)[1])[None, :, :]+distances_norm2)**self.n - self.d_attr**self.n/(\n np.identity(np.shape(distances_norm2)[1])[None, :, :]+distances_norm2)**(self.n/2) + 0.5\n v_interact *= (1 - isColliding)\n\n v = v_colliding + v_interact\n\n # A particle does not interact with itself\n for i in range(len(v)):\n np.fill_diagonal(v[i], 0)\n return v", "def uv_at_xy(self, x, y, x0, y0, s0):\n dx, dy = self.distance(x0, y0, x, y)\n #print 'dx, dy:', dx, dy\n rr2 = (dx**2 + dy**2)**-1\n u = - s0 * dy * r_twopi * rr2\n v = s0 * dx * r_twopi * rr2\n #print 'u, v', u, v\n return u, v", "def __imul__(self, n):\n vectors = [n * Vector(*(p - self.center)) for p in self.points]\n self.points = [vectors[i](self.points[i]) for i in range(len(self.points))]\n return self", "def vector_equal(v1,v2):\n if (v2.x - 0.001 <= v1.x <= v2.x + 0.001) and \\\n (v2.y - 0.001 <= v1.y <= v2.y + 0.001) and \\\n (v2.z - 0.001 <= v1.z <= v2.z + 0.001):\n return True", "def OR_Vx_Vy(self, x, y):\n\t\tself.V[x] |= self.V[y]", "def get_perpendicular(n: np.ndarray) -> np.ndarray:\n # find smallest component\n i = np.argmin(n)\n\n # get the other two indices\n a = (i + 1) % 3\n b = (i + 2) % 3\n\n result = np.zeros(3)\n result[i] = 0.0\n result[a] = n[b]\n result[b] = -n[a]\n return result", "def vspatial(X, Y, U, V):\n row, col = X.shape\n r = row\n c = col\n vsqrt = (U ** 2 + V ** 2) ** 0.5\n Ax = U / vsqrt\n Ay = V / vsqrt\n CA = np.ones((r, c))\n CV = np.ones((r, c))\n for xin in range(0, c):\n for yin in range(0, r):\n CA[yin, xin] = (Ax[0:row-yin, 0:col-xin] * Ax[yin:row, xin:col] + Ay[0:row-yin, 0:col-xin] * Ay[yin:row, xin:col]).mean()\n CV[yin, xin] = (U[0:row-yin, 0:col-xin] * U[yin:row, xin:col] + V[0:row-yin, 0:col-xin] * V[yin:row, xin:col]).mean()\n return X, Y, CA, CV", "def test_set_vx_to_vx_or_vy(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8003 | (x << 8) | (y << 4)\n for v1 in range(0x0, 0xFF):\n for v2 in range(0x0, 0xFF):\n cpu.V_register[x] = v1\n cpu.V_register[y] = v2\n cpu.set_vx_to_vx_or_vy()\n assert(cpu.V_register[x] == v1 | v2)", "def v(i, j, d):\n return 81 * (i - 1) + 9 * (j - 1) + d", "def set_vx_to_vx_or_vy(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8001 | (x << 8) | (y << 4)\n for v1 in range(0x0, 0xFF):\n for v2 in range(0x0, 0xFF):\n cpu.V_register[x] = v1\n cpu.V_register[y] = v2\n cpu.set_vx_to_vx_or_vy()\n assert(cpu.V_register[x] == v1 | v2)", "def uVectNorm(x1,y1,z1, # P\n x2,y2,z2, # Q\n x3,y3,z3): # R\n p1 = np.array([x1,y1,z1])\n p2 = np.array([x2,y2,z2])\n p3 = np.array([x3,y3,z3])\n\n v1 = p3-p1\n v2 = p2-p1\n\n cp = np.cross(v1,v2)\n a,b,c = cp\n\n d = np.dot(cp, p3)\n\n print(a,b,c)", "def vector_space(a, alpha):\n x, y = meshgrid(linspace(-2, 2, num=20), linspace(-2, 2, num=20))\n fx, fy = stuartLandau([x, y], a, alpha)\n gx, gy = noiseFunction([x, y])\n plt.quiver(x, y, fx + gx, fy + gy, color='red')\n plt.xlabel('x')\n plt.ylabel('y')\n plt.show()", "def test_set_vx_to_vx_plus_vy(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8004 | (x << 8) | (y << 4)\n for v1 in range(0x0, 0xFF):\n for v2 in range(0x0, 0xFF):\n cpu.V_register[x] = v1\n cpu.V_register[y] = v2\n cpu.set_vx_to_vx_plus_vy()\n value = v1 + v2\n if value > 0xFF:\n assert(cpu.V_register[0xF] == 1)\n assert(cpu.V_register[x] == value & 0xFF)\n else:\n assert(cpu.V_register[0xF] == 0)\n assert(cpu.V_register[x] == value)", "def vec(self):\r\n\r\n xv = np.arange(self.dx / 2, self.lx, self.dx)\r\n yv = np.arange(-self.ly / 2 + self.dy / 2, self.ly / 2, self.dy)\r\n zv = np.arange(self.oz, self.lz + self.oz, self.dz)\r\n\r\n if self.ox != 0:\r\n xv = np.arange(self.ox, self.lx + self.ox, self.dx)\r\n yv = np.arange(self.oy, self.ly + self.oy, self.dy)\r\n zv = np.arange(self.oz, self.lz + self.oz, self.dz)\r\n\r\n return xv, yv, zv", "def advect (u, v):\r\n # NOTICE: memory usage might be too high, could optimize\r\n\r\n # Store the values from timestep n\r\n un = u\r\n vn = v\r\n\r\n for i in range (height):\r\n for j in range (width):\r\n oldpos = coord (i,j) - dt * np.stack((u[i,j], v[i,j]))\r\n u[i,j], v[i,j] = interpolate (un, vn, oldpos)\r\n\r\n\r\n # Return values for timestep n+1\r\n return u, v", "def test_set_vx_to_vx_and_vy(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8002 | (x << 8) | (y << 4)\n for v1 in range(0x0, 0xFF):\n for v2 in range(0x0, 0xFF):\n cpu.V_register[x] = v1\n cpu.V_register[y] = v2\n cpu.set_vx_to_vx_and_vy()\n assert(cpu.V_register[x] == v1 & v2)", "def uvmap(self, p):\n # local_v is the unit vector that goes in the direction from the center\n # of the sphere to the position p\n local_v = (p - self.position) / self.radius\n n0, n1, n2 = self.get_orientation()\n x = np.dot(n0, local_v)\n y = np.dot(n1, local_v)\n z = np.dot(n2, local_v)\n # phi = np.arccos(z)\n # v = phi / np.pi\n # theta = np.arccos((y / np.sin(phi)).round(4))\n # if x < 0:\n # theta = 2 * np.pi - theta\n # u = theta / (2 * np.pi)\n u = 0.5 + np.arctan2(z, x) / (2 * np.pi)\n v = 0.5 - np.arcsin(y) / np.pi\n v = 1 - v\n return u, v", "def drawVectorField(x0list,y0list,x1list,y1list,vtype='normal'):\n ivec = vectordict[vtype]\n dislin.field(x0list,y0list,x1list,y1list,len(x0list),ivec)", "def __invert__(self):\n \n return Vector(-self.y, self.x)", "def nor_vector(p1: Vec2, p2: Vec2) -> Vec2:\n return Vec2(p1.y - p2.y, p2.x - p1.x)", "def vect_creator(point_a, point_b):\n vect = np.subtract(point_a, point_b)\n return vect", "def test_velocity_boundaries(self):\n L_x = self.x_edge[-1]\n np.testing.assert_array_almost_equal(self.v_box(self.t, 0), 0, decimal=4)\n np.testing.assert_array_almost_equal(self.v_box(self.t, L_x), 0, decimal=4)", "def inverse(self):\n def inv(v):\n v[0], v[1] = v[1] , v[0]\n for v in [self.point1 , self.pointN , self.unitv, self.normalv]:\n inv(v)\n\n self.points = numpy.roll(self.points,1,axis=1)\n self.a, self.b = self.b, self.a\n self.angle = numpy.arccos( self.unitv[0] )*numpy.sign(self.unitv[1] )\n return", "def proyZ1(u, v, t2):\n den = u ** 2 + v ** 2 + 4\n x = u - t2 * (u - 4 * u / den)\n y = v - t2 * (v - 4 * v / den)\n z = 1 - t2 * (2 - 8 / den)\n return (x, y, z)", "def keypoints_vflip(keypoints, rows, cols):\n keypoints[:, 1] = (rows - 1) - keypoints[:, 1]\n return keypoints", "def XOR_Vx_Vy(self, x, y):\n\t\tself.V[x] ^= self.V[y]", "def cross_z(self):\n return Vector((self.v.y, -self.v.x))", "def is_linearly_independent_2x2(u, v):\n uv = get_uv(u, v)\n if uv[0][0] * uv[1][1] - uv[1][0] * uv[0][1] != 0:\n return True\n else:\n return False", "def generador_v(vector_n, constante):\n\n v = []\n\n for x in range(len(vector_n)):\n nv = vector_n[x] // constante # // = Division entera\n v.append(nv)\n\n # print(\"valores n: \", vector_n)\n # print(\"valores v: \", v)\n\n return v", "def np_vparam_2_vplane(vparam):\n d = np.linalg.norm(vparam, ord=2, axis=-1, keepdims=True)\n a = vparam[..., [0]] / d\n b = vparam[..., [1]] / d\n neg_sign = (a < 0)\n a[neg_sign] = -a[neg_sign]\n b[neg_sign] = -b[neg_sign]\n c = -(a * vparam[..., [0]] + b * vparam[..., [1]])\n vplane = np.concatenate([a, b, c], axis=-1)\n vplane[np.isnan(vplane)] = 0\n return vplane", "def p2d(V,x,y):\n def s(a,N):\n \"\"\"Shortcut function to convert array x into a coluumn vector.\"\"\"\n a=np.reshape(a,(1,N**2),order='F').T\n return a\n N=V.shape[1]\n con=np.ones((x.shape[0],x.shape[1])) # constant terms\n xx,yy,xy=x*x,y*y,x*y\n xxx,yyy,xxy,xyy=xx*x,yy*y,xx*y,x*yy\n xxxx,yyyy,xxxy,xxyy,xyyy=xx*xx,yy*yy,xxx*y,xx*yy,x*yyy\n V2=s(V,N) \n lst=[yyyy,xxxy,xxyy,xyyy,xxx,yyy,xxy,xyy,xx,yy,xy,x,y,con]\n Q=s(xxxx,N)\n count = 0\n for elem in lst:\n elem=s(elem,N)\n count+=1\n Q=np.hstack((Q,elem))\n c=np.linalg.lstsq(Q,V2) \n c=c[0]\n theta=-0.5*np.arctan(c[11]/(c[10]-c[9]))\n Af=0.5*(c[9]*(1+1./np.cos(2*theta))+c[10]*(1-1./np.cos(2*theta)))\n Bf=0.5*(c[9]*(1-1./np.cos(2*theta))+c[10]*(1+1./np.cos(2*theta)))\n theta=180.*theta/np.pi\n return (Af, Bf, theta)", "def find_position(self, xv, yv):\n # Convert position in spheric coord\n phi = xv*self.FOV_img/360/self.img_res\n theta = yv*self.FOV_img_Y/180/self.img_res_Y\n phi2 = phi+(360-self.FOV_img)/2\n theta2 = theta+(180-self.FOV_img_Y)/2\n\n u, v, w = spheric2cart(np.radians(theta2), np.radians(phi2)) # give cartesian coord of pixel\n\n # ignore errors due to /0 -> inf, -inf\n # divide (w/v) and invalid arctan2()\n with np.errstate(all='ignore'): # OPTIMIZE: see comment about pi = -pi and don't matter if -0 or 0 -> just replace by pi\n beta = -np.arctan(w/v)\n# beta2 = -np.arctan2(w, v)\n\n# v2 = np.dot(rotation_matrix(beta), [u, v, w]) # take 3*3 created matrix and aplly to vector\n matrix = rotation_matrix(beta)\n u2 = matrix[0, 0]*u\n v2 = matrix[1, 1]*v+matrix[1, 2]*w\n w2 = matrix[2, 1]*v+matrix[2, 2]*w\n _, seen_angle = cart2spheric(u2, v2, w2) # return phi in equator \"projection\"\n\n seen_angle = np.degrees(seen_angle)\n seen_angle = np.mod(seen_angle, 360) # define phi [0, 360]\n\n# seen_angle[seen_angle > 360] -= 360\n deviated_angle = np.zeros(seen_angle.shape)\n deviated_angle[seen_angle < 180] = self.interpolation(seen_angle[seen_angle < 180])\n deviated_angle[seen_angle >= 180] = 360 - self.interpolation(360-seen_angle[seen_angle >= 180])\n# np.flip(deviated_angle, 1) \" mais probleme overlap entre left et right\n\n theta = pi/2# *np.ones(deviated_angle.shape)\n phi = np.radians(deviated_angle)\n u3, v3, w3 = spheric2cart(theta, phi) #get cart coord of deviated pixel\n\n matrix = rotation_matrix(-beta)\n u4 = matrix[0, 0]*u3\n v4 = matrix[1, 1]*v3+matrix[1, 2]*w3\n w4 = matrix[2, 1]*v3+matrix[2, 2]*w3\n\n theta, phi = cart2spheric(u4, v4, w4) #give spheric coord of deviated pixel\n\n theta, phi = np.degrees(theta), np.degrees(phi)\n\n phi -= (360-self.FOV_img)/2\n theta -= (180-self.FOV_img_Y)/2\n\n with np.errstate(all='ignore'): # OPTIMIZE\n phi = np.mod(phi, 360) # define phi [0, 360]\n theta = np.mod(theta, 180) # define phi [0, 360]\n\n phi[phi == 360] = 0\n xv2 = phi*360/self.FOV_img*self.img_res\n yv2 = theta*180/self.FOV_img_Y*self.img_res_Y #give deviated angle pixel position\n\n xv2[np.isnan(xv2)] = -1\n yv2[np.isnan(yv2)] = -1\n\n xv2 = np.array(xv2, dtype=int)\n yv2 = np.array(yv2, dtype=int)\n\n return xv2, yv2", "def normalizeVert(v, radius):\n # calculate current distance\n dist = (v.co.x ** 2 + v.co.y ** 2 + v.co.z ** 2) ** 0.5\n\n # normalize\n for axis in range(3):\n v.co[axis] = v.co[axis] / dist * radius", "def boxstuff(pts,vec):\n\treturn pts-(pts>vec)*vec+(pts<np.array([0.,0.,0.]))*vec", "def draw_g_vector_field():\n x_matrix = np.arange(-10, 10, 1)\n y_matrix = np.arange(-10, 10, 1)\n u_matrix, v_matrix = np.meshgrid(x_matrix, y_matrix)\n fig, ax = plt.subplots()\n new_u_matrix = u_matrix.copy()\n new_v_matrix = v_matrix.copy()\n for i in range(len(u_matrix)):\n for j in range(len(u_matrix[0])):\n new_u_matrix[i][j] = 4 * u_matrix[i][j] + v_matrix[i][j] - 1\n new_v_matrix[i][j] = u_matrix[i][j] + 8 * v_matrix[i][j] + 1\n q = ax.quiver(x_matrix, y_matrix, new_u_matrix, new_v_matrix)\n ax.quiverkey(q, X=0.3, Y=1.1, U=10,\n label='Quiver key, length = 10', labelpos='E')\n return", "def compute_green_function(self,n):\n size = np.arange(n)\n xx,yy = np.meshgrid(size,size)\n vectors = np.array([xx.ravel(),yy.ravel()])\n norm = norm_on_grid(vectors)\n green = green_function(norm,self.grid,self.softner,self.G)\n try:\n green[n//2:, :n//2] = np.flip(green[:n//2, :n//2],axis=0)\n green[:n//2, n//2:] = np.flip(green[:n//2, :n//2],axis=1)\n green[n//2:, n//2:] = np.flip(green[:n//2, :n//2])\n except:\n green[n//2:, :n//2+1] = np.flip(green[:n//2+1, :n//2+1],axis=0)\n green[:n//2+1, n//2:] = np.flip(green[:n//2+1, :n//2+1],axis=1)\n green[n//2:, n//2:] = np.flip(green[:n//2+1, :n//2+1])\n self.green = green", "def dexpinv(self, u, v, _=None):\n A, a = np.split(u, 2)\n B, b = np.split(v, 2)\n alpha = np.linalg.norm(A)\n rho = np.inner(A, a)\n if np.isclose(alpha, 0):\n return v\n c1 = (\n B\n - 0.5 * np.cross(A, B)\n + self._dexpinv_helper_1(alpha) * np.cross(A, np.cross(A, B))\n )\n c2 = (\n b\n - 0.5 * (np.cross(a, B) + np.cross(A, b))\n + self._dexpinv_helper_2(alpha, rho) * np.cross(A, np.cross(A, B))\n + self._dexpinv_helper_1(alpha)\n * (\n np.cross(a, np.cross(A, B))\n + np.cross(A, np.cross(a, B))\n + np.cross(A, np.cross(A, b))\n )\n )\n return np.hstack((c1, c2))", "def test_vertical_velocity(self):\n L_x = self.x_edge[-1]\n np.testing.assert_array_equal(self.dVbox_dz(self.t, 0), 0)\n np.testing.assert_array_less(self.dVbox_dz(self.t, 0.5 * L_x), 0)\n np.testing.assert_array_equal(self.dVbox_dz(self.t, L_x), 0)", "def drawVector(x0,y0,x1,y1,vtype='normal fill', ucoords=1):\n ivec = vectordict[vtype]\n if ucoords:\n dislin.rlvec(x0,y0,x1,y1,ivec)\n else:\n dislin.vector(x0,y0,x1,y1,ivec)", "def __init__(self, vec2d):\n self.vec2d, self.i, self.j = vec2d, -1, 0", "def pv(self, other):\n\n assert self.n == other.n == 3, \"Produto vetorial definido somente em R3\"\n\n u, v = self, other\n\n return Vetor([u[1] * v[2] - u[2] * v[1],\n u[2] * v[0] - u[0] * v[2],\n u[0] * v[1] - u[1] * v[0]])", "def vector_component(u, v):\n x = dot_vectors(u, v) / length_vector_sqrd(v)\n return scale_vector(v, x)", "def l2(u: np.ndarray, v: np.ndarray) -> np.ndarray:\n\n return (u - v) ** 2", "def __neg__(self):\r\n return vec4(-self.x, -self.y, -self.z, -self.w)", "def vector_line(self):\n assert len(self.xcoords) == 2\n diff_x = self.xcoords[1] - self.xcoords[0]\n diff_z = self.zcoords[1] - self.zcoords[0]\n vec = np.hstack((diff_x, diff_z))\n return vec", "def npxywha2vertex(box):\n batch = box.shape[0]\n\n center = box[:,:2]\n w = box[:,2]\n h = box[:,3]\n rad = box[:,4]\n\n # calculate two vector\n verti = np.empty((batch,2), dtype=np.float32)\n verti[:,0] = (h/2) * np.sin(rad)\n verti[:,1] = - (h/2) * np.cos(rad)\n\n hori = np.empty((batch,2), dtype=np.float32)\n hori[:,0] = (w/2) * np.cos(rad)\n hori[:,1] = (w/2) * np.sin(rad)\n\n tl = center + verti - hori\n tr = center + verti + hori\n br = center - verti + hori\n bl = center - verti - hori\n\n return np.concatenate([tl,tr,br,bl], axis=1)", "def npxywha2vertex(box):\n batch = box.shape[0]\n\n center = box[:,:2]\n w = box[:,2]\n h = box[:,3]\n rad = box[:,4]\n\n # calculate two vector\n verti = np.empty((batch,2), dtype=np.float32)\n verti[:,0] = (h/2) * np.sin(rad)\n verti[:,1] = - (h/2) * np.cos(rad)\n\n hori = np.empty((batch,2), dtype=np.float32)\n hori[:,0] = (w/2) * np.cos(rad)\n hori[:,1] = (w/2) * np.sin(rad)\n\n tl = center + verti - hori\n tr = center + verti + hori\n br = center - verti + hori\n bl = center - verti - hori\n\n return np.concatenate([tl,tr,br,bl], axis=1)", "def test_inverse_of_linear_vector_transforms(free_alg: Drudge):\n\n dr = free_alg\n p = dr.names\n v = p.v\n\n a = Vec('a')\n b = Vec('b')\n\n defs = [\n dr.define(a, v + 1),\n dr.define(b, v - 1)\n ]\n res = dr.lvt_inv(defs)\n\n assert len(res) == 2\n half = Rational(1, 2)\n one_checked = False\n v_checked = False\n for i in res:\n if i.lhs == 1:\n assert (i - half * a + half * b).simplify() == 0\n one_checked = True\n elif i.lhs == v:\n assert (i - half * a - half * b).simplify() == 0\n v_checked = True\n else:\n assert False\n continue\n\n assert one_checked and v_checked", "def ncross2(u, v):\n return sq2(u) * sq2(v) - dot2(u, v) ** 2", "def get_inverse_2x2(u, v):\n if not is_linearly_independent_2x2(u, v):\n return\n uv = get_uv(u, v)\n iden = get_uv([1, 0],[0, 1])\n a = np.zeros((2, 4))\n for i in range(2):\n for j in range(2):\n a[i][j] = uv[i][j]\n a[i][j+2] = iden[i][j]\n\n q = a[0][1] / a[1][1]\n a[0] = a[0] - q * a[1]\n\n q = a[1][0] / a[0][0]\n a[1] = a[1] - q * a[0]\n\n a[0] /= a[0][0]\n\n a[1] /= a[1][1]\n\n for i in range(2):\n for j in range(2):\n uv[i][j] = a[i][j+2]\n return uv", "def stabilizer_vector(v, g, n):\n vg = v.copy()\n w = v.copy()\n for i in range(1, n):\n vg *= g \n w += vg\n assert v == vg * g\n if (w['B'] == 0).all():\n return None\n return w", "def __xor__(self,v2):\n\t\treturn np.cross(self._vec,v2._vec)", "def vector(self,\n i: int,\n j: int) -> np.ndarray:\n return self[j].coord - self[i].coord", "def non_rotated_vertices(self):\n v0 = [self.pos.x - self.width / 2, self.pos.y - self.height / 2]\n v1 = [self.pos.x + self.width / 2, self.pos.y - self.height / 2]\n v2 = [self.pos.x + self.width / 2, self.pos.y + self.height / 2]\n v3 = [self.pos.x - self.width / 2, self.pos.y + self.height / 2]\n return v0, v1, v2, v3", "def perpendicular_vector(v):\n if v[1] == 0 and v[2] == 0:\n if v[0] == 0:\n raise ValueError(\"zero vector\")\n else:\n return np.cross(v, [0, 1, 0])\n return np.cross(v, [1, 0, 0])", "def vinet(p, v):\n x = ( v / p[3] ) ** ( 1.0 / 3 )\n xi = 3.0 / 2 * ( p[2] - 1 )\n return p[0] + 9 * p[1] * p[3] / ( xi**2 ) * ( 1 + ( xi * ( 1 - x ) - 1 ) * np.exp( xi * ( 1 - x ) ) )", "def get_uv(u, v):\n uv = np.zeros((2, 2))\n uv[0][0] = u[0]\n uv[1][0] = u[1]\n uv[0][1] = v[0]\n uv[1][1] = v[1]\n return uv", "def getVec(pos1, pos2):\n\n x1 = pos2[0] - pos1[0]\n y1 = pos2[1] - pos1[1]\n gcd1 = math.gcd(abs(x1), abs(y1))\n\n if gcd1 > 0:\n x = x1//gcd1\n else:\n x = x1\n if gcd1 > 0:\n y = y1//gcd1\n else:\n y = y1\n\n return x, y", "def touches((u,v)):\r\n return ((u,v), (u,v-1), (u-1,v-1), (u-1,v))", "def neighbours((u,v)):\r\n return ((u,v+1), (u+1,v), (u,v-1), (u-1,v))", "def vxvyvz(ra, dec, l, b, mura, mudec, vrad, dist, parallax=False, vlsr=220, vsun=(-11.1, 12.24, 7.25), zsun=0, rsun=8,\n\t\t emura=None, emudec=None, evrad=None, edist=None, MCerror=False):\n\n\tif parallax:\n\t\tdist_new = ut.parallax_to_distance(dist)\n\t\tif edist is not None: edist_new = edist * dist * dist\n\telse:\n\t\tdist_new = dist\n\t\tedist_new = edist\n\n\tmul, mub = co.pmrapmdec_to_pmllpmbb(mura, mudec, ra, dec, degree=True).T\n\n\txs, ys, zs, vxs, vys, vzs = co.sphergal_to_rectgal(l, b, dist_new, vrad, mul, mub, degree=True).T\n\n\tRs = np.sqrt(xs * xs + ys * ys)\n\n\tvsun = np.array([0., vlsr, 0., ]) + np.array(vsun)\n\t# vsun=(0,0,0)\n\n\tR, phi, Z = co.XYZ_to_galcencyl(xs, ys, zs, Zsun=zsun, Xsun=rsun).T\n\n\tif edist is not None:\n\t\teR = np.abs(edist_new * np.cos(b * np.pi / 180))\n\t\teZ = np.abs(edist_new * np.sin(b * np.pi / 180))\n\telse:\n\t\teR = np.nan\n\t\teZ = np.nan\n\n\tvR, vT, vZ = co.vxvyvz_to_galcencyl(vxs, vys, vzs, R, phi, Z, vsun=vsun, Xsun=rsun, Zsun=zsun,\n\t\t\t\t\t\t\t\t\t\tgalcen=True).T\n\n\tif emura is not None and emudec is not None and evrad is not None and edist is not None:\n\t\tcovpmrapmdec = np.zeros((len(emura), 2, 2))\n\t\tcovpmrapmdec[:, 0, 0] = emura\n\t\tcovpmrapmdec[:, 1, 1] = emudec\n\t\tcovpmlpmb = co.cov_pmrapmdec_to_pmllpmbb(covpmrapmdec, ra, dec, degree=True)\n\t\tcovV = co.cov_dvrpmllbb_to_vxyz(dist, edist, evrad, mul, mub, covpmlpmb, l, b, plx=parallax, degree=True)\n\n\t\teVx, eVy, eVz = covV[:, 0, 0], covV[:, 1, 1], covV[:, 2, 2]\n\t\txg = xs - rsun\n\t\ttheta = np.arctan2(ys, xg)\n\t\tct = np.cos(theta)\n\t\tst = np.sin(theta)\n\t\teVR = np.sqrt(eVx * eVx * ct * ct + eVy * eVy * st * st)\n\t\teVT = np.sqrt(eVx * eVx * st * st + eVy * eVy * ct * ct)\n\n\t\treturn Rs, R, Z, vZ, vR, vT, eR, eZ, eVR, eVT, eVz\n\n\treturn Rs, R, Z, vZ, vR, vT", "def cross(v1: Vector, v2: Vector) -> Vector: # Function is fucked TODO\n if len(v1.coords) != 3 or len(v2.coords) != 3:\n raise ValueError(\"Vectors have to be 3 fucking D, nøøb\")\n x = v1.y * v2.z - v1.z * v2.y\n y = v1.z * v2.x - v1.x * v2.z\n z = v1.x * v2.y - v1.y * v2.x\n return Vector(x, y, z)" ]
[ "0.65670574", "0.6445967", "0.6404751", "0.6170087", "0.6155207", "0.6150926", "0.6138606", "0.6094283", "0.6083068", "0.6052513", "0.6026877", "0.5970654", "0.5965773", "0.5965605", "0.59526074", "0.5930183", "0.5930033", "0.59086335", "0.5895722", "0.5877011", "0.5875526", "0.58636105", "0.5845401", "0.58264625", "0.58239585", "0.58193433", "0.58039296", "0.58035254", "0.58021384", "0.57867295", "0.57745266", "0.577216", "0.57654655", "0.57581484", "0.5743896", "0.5739057", "0.5718975", "0.5686668", "0.5680854", "0.5677167", "0.56744516", "0.56721723", "0.56721514", "0.56651866", "0.5660844", "0.5660101", "0.5656413", "0.5652438", "0.56421775", "0.56403065", "0.563668", "0.56190497", "0.56134486", "0.56111294", "0.5610019", "0.56073666", "0.5602704", "0.560191", "0.559577", "0.55956423", "0.55933577", "0.5591688", "0.5585934", "0.5582173", "0.55689216", "0.5559876", "0.5555444", "0.55538493", "0.5550581", "0.5549841", "0.554449", "0.55437124", "0.5538965", "0.5537199", "0.5535178", "0.5526947", "0.5520108", "0.55156446", "0.5496165", "0.549084", "0.54863256", "0.54858655", "0.54856676", "0.5476668", "0.5476668", "0.54684323", "0.54667336", "0.5466635", "0.5463035", "0.5461133", "0.54593134", "0.5457405", "0.545717", "0.5452877", "0.54522", "0.5451882", "0.54508424", "0.5447416", "0.54450774", "0.54450005" ]
0.59364426
15
Add a new hotel to the system
async def add_hotel_endpoint(request): hotel_name = request.args["hotel_name"][0] hotel_id = model.add_hotel(hotel_name) return json({"hotel_id": hotel_id})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_new_arrival(self):\n pass", "def addHotspot( self, hotspot ):\n self._hotspots.append(hotspot)", "async def add_reservation_endpoint(request):\n hotel_id = request.args[\"hotel_id\"][0]\n room_type = request.args[\"room_type\"][0]\n arrival_date = request.args[\"arrival_date\"][0]\n departure_date = request.args[\"departure_date\"][0]\n status = request.args[\"status\"][0]\n reservation_id = model.add_reservation(hotel_id, room_type, arrival_date, departure_date, status)\n if reservation_id == model.OPERATION_ERROR_RETURN_CODE:\n return json({\"success\": False})\n return json({\"success\": True, \"reservation_id\": reservation_id})", "async def add_inventory_endpoint(request):\n hotel_id = request.args[\"hotel_id\"][0]\n room_type = request.args[\"room_type\"][0]\n room_inventory = request.args[\"room_inventory\"][0]\n model.add_inventory(hotel_id, room_type, room_inventory)\n return json({\"success\": True})", "def add_food(self, _food):\n self.food.append(_food)", "def add_room(self, data):\n room_id = data['room_id']\n x, y = literal_eval(data['coordinates'])\n room_data = {'id': data['room_id'],\n 'title': data['title'],\n 'description' : data['description'],\n 'coordinates': literal_eval(data['coordinates']),\n 'elevation': data['elevation'],\n 'terrain': data['terrain'],\n 'exits' : {direction: '?' for direction in data['exits']}\n }\n self.rooms.setdefault(room_id, room_data)", "def add_registry(self) -> None:\n\n # inits functions corresponding to user input and takes in url input\n item_options = {'n': self.inp_item_price, 'y': self.inp_book_prices}\n url = str(input(\"Enter URL to amazon item: \"))\n # validates url input - prevents inputting duplicate and/or blank URLs\n if(url == \"\" or url in self.load_links()[1]):\n print(\"Item not added - URL already exists or is blank\")\n return\n # user-input price(s) -> then -> validates price input \n prices = item_options.get(self.input_item_category())()\n try:\n for price in prices:\n float(price)\n except ValueError:\n print(\"Do not include any letters or symbols other than '.' - Item not added!\")\n return\n # writes input as a line of text to text file\n with open(URL_FILE, 'a') as text_file:\n text_file.write(self.format_string(url, prices))\n pass", "def add_station(self, station_id=None, time=None, location=None):", "def insert(self, name, address, city, state, zipcode, hour, phone, rating, image):\r\n pass", "def restaurants_new():\n # If the user isn't logged in, send to the login page\n if helper.handle_login(login_session) is False:\n return redirect('/login')\n\n if request.method == 'POST':\n if len(request.form['name']) > 0:\n new_restaurant = Restaurant(name=request.form['name'],\n address=request.form['address'],\n phone=request.form['phone'],\n web=helper.check_restaurant_URL(request.form['web']),\n description=request.form['description'],\n user_id=login_session['user_id'])\n session.add(new_restaurant)\n session.commit()\n flash(\"New restaurant created - {}\".format(new_restaurant.name))\n tag_line = request.form['tag_line']\n tag_list = tag_line.split(',')\n for tag in tag_list:\n helper.add_tag_if_not_exists(tag, new_restaurant.id)\n return redirect(url_for('restaurants_page'))\n else:\n flash(\"Incorrect Restaurant details - Please include a name!\")\n\n user_info = helper.get_user_if_exists(login_session)\n return render_template('newrestaurant.html', user_info=user_info)", "def add_location(self, **kwargs):\n \n self.options.update(kwargs)\n self.options['action'] = 'locator.location.add'\n return self.call(self.options)", "def newEquipment(recipe):\r\n db = db_helpers.getDbCon()\r\n cursor = db.cursor()\r\n equipmentInsertQuery = \"\"\"INSERT into equipment (equipment_id, equipment_name) \r\n VALUES (%s, %s) ON Duplicate KEY UPDATE equipment_id = equipment_id;\"\"\"\r\n try:\r\n for instr in recipe.instructions:\r\n for equip in instr.equipment:\r\n cursor.execute(equipmentInsertQuery, (equip.equipment_id, equip.equipment_name))\r\n db.commit()\r\n except Exception:\r\n print(\"Error: OOPs something went wrong while adding new equipment to the database\")\r\n finally:\r\n cursor.close()\r\n db.close()", "def create_hotels_list(self):\n\n hotels = load.loader.get_all_hotels()\n\n self.clear_widgets()\n\n for hotel in hotels:\n btn = HotelButton(text=hotel.name)\n self.add_widget(btn)\n btn.bind(on_release=lambda bt: self.select(bt.text))", "def register_restaurant(self, id, location, meals_list):\r\n r = Restaurant(id)\r\n r.set_location(location)\r\n r.set_meals_offered_list(meals_list)\r\n self._restaurants_list.append(r)", "def add_flight(self, flight: Flight):\n self.flights.append(flight)", "def test_create_hotel(self):\n amsterdam = City.objects.get(name=\"Amsterdam\")\n ibis = Hotel.objects.get(name=\"Ibis\")\n\n self.assertEqual(ibis.city, amsterdam)\n self.assertEqual(ibis.code, \"AMS01\")\n self.assertEqual(ibis.name, \"Ibis\")", "def _create_fleet(self):\n # make an alien\n alien = Alien(self)\n self.aliens.add(alien)", "def add_visit():\n\n # checks to see if user is logged in\n\n if session.get('username'):\n username = session['username']\n user = User.query.filter_by(username=username).first()\n\n # finds the friend searched for on the database\n friend = request.args.get(\"friend\")\n friend_user = User.query.filter_by(username=friend).first()\n\n when = request.args.get(\"when\")\n user_rating = Decimal(request.args.get(\"rating\"))\n\n # finds the restaurant's ID, adds the restaurant to the database if not in yet\n restaurant = request.args.get(\"name\")\n yelp_id = request.args.get(\"id\")\n avg_rating = request.args.get(\"avg_rating\")\n price_lvl = request.args.get(\"price\")\n review_count = request.args.get(\"rc\")\n categs = request.args.get(\"categs\")\n list_categs = categs.split(\",\")\n\n if not Restaurant.query.filter_by(name=restaurant).all():\n new_restaurant = Restaurant(yelp_id=yelp_id,\n name=restaurant,\n rating=avg_rating,\n price=turn_to_nums(price_lvl),\n review_count=review_count)\n db.session.add(new_restaurant)\n db.session.commit()\n\n rest_id = db.session.query(Restaurant.id).filter_by(yelp_id=yelp_id).first()[0]\n if not Category.query.filter_by(rest_id=rest_id).all():\n if len(list_categs) == 3:\n categ1, categ2, categ3 = list_categs\n elif len(list_categs) == 2:\n categ1, categ2 = list_categs\n categ3 = None\n else:\n categ1 = list_categs\n categ2 = None\n categ3 = None\n new_categs = Category(rest_id=rest_id,\n categ1=categ1,\n categ2=categ2,\n categ3=categ3)\n db.session.add(new_categs)\n db.session.commit()\n\n # Adding to the visits and uservisits tables\n new_visit = Visit(rest_id=rest_id, date=when)\n db.session.add(new_visit)\n db.session.commit()\n new_visit_id = db.session.query(Visit.id).filter_by(rest_id=rest_id,\n date=when).order_by(Visit.date.desc()).first()[0]\n new_visit_exp = UserExp(visit_id=new_visit_id,\n user_id=user.id,\n rating=user_rating)\n f_new_visit_exp = UserExp(visit_id=new_visit_id,\n user_id=friend_user.id)\n db.session.add(new_visit_exp)\n db.session.add(f_new_visit_exp)\n db.session.commit()\n return \" <span class='label label-success'>Saved!</span>\"\n\n # if not logged in, cannot save\n else:\n return \" <a href='/login'><span class='label label-default'>Login to save</span></a>\"", "def add(self, offer):\n other_offer = self.get(offer.get_price(), offer.get_way())\n if other_offer:\n other_offer.add_quote_amount(offer.get_quote_amount())\n other_offer.add_base_amount(offer.get_base_amount())\n return\n self.book[offer.get_way()].append(offer)\n self.book[offer.get_way()] = sorted(self.book[offer.get_way()], key=lambda entry: entry.get_price(),\n reverse=(offer.get_way() == Trade.WAY_BUY))", "def post(self, request, _format=None): # pylint: disable=unused-argument, no-self-use\n # Assign hotel_booking variables\n if '_TRANSACTION_TYPE_' in request.data['slots']:\n transaction_type = request.data['slots']['_TRANSACTION_TYPE_']['candidates'][0]['tokens']\n else:\n transaction_type = None\n\n if '_LOCATION_' in request.data['slots']:\n location = request.data['slots']['_LOCATION_']['candidates'][0]['tokens']\n else:\n location = None\n\n if '_PRICE_' in request.data['slots']:\n price = request.data['slots']['_PRICE_']['candidates'][0]['tokens']\n else:\n price = None\n\n # this loop sets all of the _SLOTS_ to have a `\"resovled\": 1` so they will be kept\n # through each turn of the conversation. Currently, each turn the slots are sent\n # with a `\"resolved\": -1`, so they need to be reset each time, however, they are\n # changing to be persistent based on their resolved status in an update coming soon\n for (slot, slot_data) in request.data['slots'].iteritems():\n if 'candidates' in request.data['slots'][slot]:\n for candidate in range(len(slot_data['candidates'])):\n request.data['slots'][slot]['candidates'][candidate]['resolved'] = 1\n if slot != '_DATE_':\n request.data['slots'][slot]['candidates'][candidate]['value'] = \\\n request.data['slots'][slot]['candidates'][candidate]['tokens']\n else:\n request.data['slots'][slot]['resolved'] = 1\n\n #magical API call to check their credit\n available_credit = check_available_credit()\n\n # state transition example\n # if someone does not have enough credit available to pay for the hotel, \n # redirect them to a credit_card_offer state, and return the payload\n if available_credit < price:\n request.data['state'] = 'credit_card_offer'\n return Response(request.data)\n\n if transaction_type == 'express deal':\n if location and price:\n # This is our magical API call to find express deals\n hotel = find_express_deal(location, price)\n if hotel:\n # This is how to add new _SLOTS_ to the business logic json\n hotel_rating = {\n \"candidates\": [\n {\n \"resolved\": 1,\n \"value\": hotel['hotel_rating']\n }\n ],\n \"required_matches\": \"EQ 1\",\n \"type\": \"string\"\n }\n request.data['slots']['_HOTEL_RATING_'] = hotel_rating\n hotel_type = {\n \"candidates\": [\n {\n \"resolved\": 1,\n \"value\": hotel['hotel_type']\n }\n ],\n \"required_matches\": \"EQ 1\",\n \"type\": \"string\"\n }\n request.data['slots']['_HOTEL_TYPE_'] = hotel_type\n\n # return the modified business logic payload\n return Response(request.data)", "def add_feature(request):\n\n r = {}\n if request.POST.get('code','000') == 'ch00seW199Er':\n # pick a random location\n featured_already = Featured.objects.all().values('location')\n locations = Location.objects.exclude(id=1).exclude(id__in=featured_already).exclude(name__iregex=r'[\\w# ]+(wash|washer|dryer|dyer)[\\w# ]*').filter(type=Location.EATERY)\n features = sample(locations, 10)\n i = randint(0,9)\n selected = features[i]\n tomorrow = date.today()+timedelta(1)\n \n f = Featured(location=selected, \n day=tomorrow,\n description=\"50 cents off if you transact here today\",\n amount=0.5,\n expires=datetime(tomorrow.year, tomorrow.month, tomorrow.day, 13,59))\n f.save() \n r['result'] = {'location': selected.name, 'loc_id': selected.id}\n else:\n r['result'] = '-1'\n return JSONHttpResponse(r)", "def add_location(db_path: str, location: Location) -> None:\n query = f'INSERT INTO locations (name, area, climate) VALUES (\"{location.name}\", {location.area}, {location.climate.climate_type})'\n\n conn: Connection = sqlite3.connect(path.join(db_path, 'company_data.db'))\n curr: Cursor = conn.cursor()\n try:\n curr.execute(query)\n except sqlite3.IntegrityError:\n raise ValueError(\"Error, tray already exists in database.\")\n\n conn.commit()\n curr.close()\n conn.close()", "def addBooking(self, booking):\n self.bookings.addBooking(booking.getID())", "def add_new_oneoff():\n ClientID = request.form['ClientID']\n oneoff_name = request.form['oneoff_name']\n charge = int(float(request.form['charge']) * 100)\n period = request.form['period']\n time = int(float(request.form['time']) * 100)\n workdate = request.form['workdate']\n \n OneOff.insert(oneoff_name, ClientID, charge, time, period, workdate)\n\n return redirect(url_for('all_jobs_for_client', ClientID=ClientID))", "def create_office(self, data):\n return self.client.post(\n path='/api/v1/offices/', data=json.dumps(data), content_type='application/json')", "def add(self):\n pass", "def add_place(name, country, city, street):\n place = Place(name=name, country=country, city=city, street=street)\n session.add(place)\n session.commit()", "def handle_add(self, controller):\n \n try:\n pizza = controller.customer.pizza ## get a reference to pizza object of the customer\n \n except Exception:\n showinfo(title='Pop-up', message=\"No Pizza Created Yet.\")\n return\n \n else:\n # create an order if not exist, and add pizza to order\n c = controller.customer\n self.onPress(c) ## update requested data\n if not c.my_order:\n c.my_order = Order(c.name, c.address, c.id)\n \n c.AddToOrder()\n controller.show_frame(PageTwo) ## go to my order page", "def _addSite(self,site):\n self.sites.append(site)", "def add_location():\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n \n form = Location_Form()\n\n if form.validate_on_submit():\n try:\n location = Location(\n site_name = form.site_name.data,\n city = form.city.data,\n state = form.state.data\n )\n db.session.add(location)\n db.session.commit()\n except IntegrityError:\n flash(\"This location already exists\", \"danger\")\n return render_template(\"/admin/add_location.html\", form = form)\n \n flash(\"Location Added!\", \"success\")\n return redirect(\"/administrator\")\n else:\n return render_template(\"/admin/add_location.html\", form = form)", "def add_el(cal, y, m, d, w, t, wind):\r\n i = find_el(cal, y, m, d)\r\n if i is None:\r\n day = {'year': y, 'day': d, 'month': m, 'weather': w,\r\n 'temperature': t, 'wind': wind}\r\n cal.append(day)\r\n else:\r\n print '''This day already have an input if you want \\\r\nto change select the editing mode'''\r\n return cal", "def add_room(self, room):\n self.rooms[room.name] = room", "def add(self, product):\n product_id = str(product.id)\n self.wishlist[product_id] = {'price': str(product.price)}\n self.save()", "def add(self, title, description, state):\n return self.app.post('/add', data=dict(title=title,\n description=description,\n state=state),\n follow_redirects=True)", "def add_flight(self, destination, cost):\n\n self.flights.add((destination, cost))", "def add(self, PlugLead):\n\n self.check_conflicts(PlugLead)\n self.plugleads.append(PlugLead)", "def new(self, obj):\n self.__session.add(obj)", "def new(self, obj):\n self.__session.add(obj)", "def new(self, obj):\n self.__session.add(obj)", "def new(self, obj):\n self.__session.add(obj)", "def new(self, obj):\n self.__session.add(obj)", "def new(self, obj):\n self.__session.add(obj)", "def new(self, obj):\n self.__session.add(obj)", "def new(self, obj):\n self.__session.add(obj)", "def add(self, obj):\n self.session.add(obj)", "def add_holiday(request):\n if not request.user.is_superuser:\n messages.error(request, 'Access denied!\\\n Sorry, only site owners have this permission.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n holiday_form = HolidayForm(request.POST, request.FILES)\n itinerary_form = ItineraryForm(request.POST, request.FILES)\n if holiday_form.is_valid() and itinerary_form.is_valid():\n holiday = holiday_form.save(commit=False)\n holiday.is_holiday = True\n holiday.save()\n itinerary = Itinerary.objects.create(holiday=holiday,\n name=holiday.name)\n itinerary.save()\n holiday.save()\n itinerary_form = itinerary_form.save(commit=False)\n itinerary_form.itinerary = holiday.itinerary\n holiday.save()\n itinerary_form.save()\n messages.success(request, 'Successfully added holiday tour!')\n return redirect(reverse('holiday_detail', args=[holiday.id]))\n else:\n messages.error(request, 'Failed to add holiday tour. Please ensure \\\n the form is valid.')\n else:\n holiday_form = HolidayForm()\n itinerary_form = ItineraryForm()\n\n template = 'products/add_holiday.html'\n context = {\n 'holiday_form': holiday_form,\n 'itinerary_form': itinerary_form,\n }\n\n return render(request, template, context)", "def add_car(sefl):\n make = \"Test_Toyota\"\n body_type = \"Seden\"\n colour = \"Black\"\n seats = 5\n location = \"-37.814, 144.96332\"\n cost_per_hour = 10.5\n\n newCar = Car( make = make,\n body_type = body_type,\n colour = colour,\n seats = seats,\n location = location,\n cost_per_hour = cost_per_hour,\n booked = True\n\n )\n db.session.add(newCar)\n db.session.commit()\n self.assertTrue(self.carExists(make))", "def place_new():\n if g.is_logged == False:\n flash (\"You need to be logged in\")\n return redirect(url_for('index'))\n\n if request.method == 'POST':\n db = get_db()\n db.execute('''insert into places (name, address, city, zipcode) values (?, ?, ?, ?)''', [request.form['name'], request.form['address'], request.form['city'], request.form['zipcode']])\n db.commit()\n\n flash('The restaurant was succesfully added')\n return redirect(url_for('index'))\n else:\n\n return render_template('newplace.html')", "def addExpense(self, expense) :\n self.__passbook.append(expense)", "def add_room(self, room):\n self.rooms.append(room)", "def test_api_can_add_food_to_a_meal(self):\n response = self.client.post(f'/api/v1/meals/{self.breakfast.id}/foods/{self.oatmeal.id}')\n # import code; code.interact(local=dict(globals(), **locals()))\n\n self.assertEqual(response.data['message'], \"Successfully added oatmeal to breakfast\")", "def add(self):\n\n db.session.add(self)\n db.session.commit()", "def add(self):\n\n db.session.add(self)\n db.session.commit()", "def ship_new(name):\n click.echo('Created ship %s' % name)", "def new_ip(self, ip):\n if not ip in self.ip_list:\n self.ip_list.add(ip)\n host = self.hs.id_to_object(ip)\n host.add_tag('sniffer')\n host.save()\n print_success(\"New ip address: {}\".format(ip))", "def create_candidate(self, data, header):\n return self.client.post(\n path='/api/v2/office/1/register/', data=json.dumps(data), content_type='application/json', headers=header)", "def l7pool_add(env, identifier, **args):\n\n mgr = SoftLayer.LoadBalancerManager(env.client)\n uuid, _ = mgr.get_lbaas_uuid_id(identifier)\n\n pool_main = {\n 'name': args.get('name'),\n 'loadBalancingAlgorithm': args.get('method'),\n 'protocol': args.get('protocol')\n }\n\n pool_members = list(args.get('server'))\n\n pool_health = {\n 'interval': args.get('healthinterval'),\n 'timeout': args.get('healthtimeout'),\n 'maxRetries': args.get('healthretry'),\n 'urlPath': args.get('healthpath')\n }\n\n pool_sticky = {\n 'type': args.get('sticky')\n }\n\n try:\n mgr.add_lb_l7_pool(uuid, pool_main, pool_members, pool_health, pool_sticky)\n click.secho(\"Success\", fg='green')\n except SoftLayerAPIError as exception:\n click.secho(f\"ERROR: {exception.faultString}\", fg='red')", "def post():\n\n errors = check_office_keys(request)\n if errors:\n return raise_error(400, \"Invalid {} key\".format(', '.join(errors)))\n details = request.get_json()\n category = details['category']\n name = details['name']\n\n if details['name'].isalpha() is False:\n return raise_error(400, \"The name of the office is in wrong format!\")\n if (office_restrictions(category) is False):\n return raise_error(400, \"select from state, local, federal or legislative\")\n if OfficesModel().get_name(name):\n return raise_error(400, \"office with that name already exists!\")\n\n res = OfficesModel().save(category, name)\n return jsonify({\n \"status\": \"201\",\n \"message\": \"office created successfully!\",\n \"office\": res\n }), 201", "def add_appliance(itemcode, description, marketprice, rentalprice):\n\n itembrand = input(\"Enter item brand: \")\n itemvoltage = input(\"Enter item voltage: \")\n newitem = ElectricAppliances \\\n (itemcode, description, marketprice, rentalprice,\n itembrand, itemvoltage)\n\n FULLINVENTORY[itemcode] = newitem.returnasdictionary()\n print(\"New inventory item added\")", "def new(self, obj):\n\n self.__session.add(obj)", "def new(self, obj):\n\n self.__session.add(obj)", "def add_location(cities, states, business):\n\tcities.add(business[CITY])\n\tstates.add(business[STATE])", "def handle_add(self, controller):\n \n controller.customer.CreatePizza()\n controller.show_frame(PageOne)", "def add_door(door): # noqa: E501\n if connexion.request.is_json:\n door = Door.from_dict(connexion.request.get_json()) # noqa: E501\n\n db = PostgresDB()\n error = db.add_new_door(door.name)\n if error:\n return error\n return \"Door inserted successfully\"", "def newlawn(request, location):\n l = Lawn()\n l.location = location\n l.save()\n return TemplateResponse(request, 'lawn.html', {'lawn': l})", "def add(self, el):\n raise Exception('TODO IMPLEMENT ME !')", "def post(self):\n FeatureBusiness.add(request.get_json(), user_id=request.user_id)\n\n return {\"status\": 201}, 201", "def add_league(inp_to_add, type_to_add, con, host, root, password):\r\n with con.cursor() as cur:\r\n if type_to_add == \"url\":\r\n league_soup = BeautifulSoup(requests.get(inp_to_add).text, 'html.parser')\r\n league_site = inp_to_add\r\n elif type_to_add == \"country\":\r\n midterm_url = get_countries_dict()[inp_to_add]\r\n league_soup = BeautifulSoup(requests.get(midterm_url).text, 'html.parser')\r\n league_site = SOCCER_URL + league_soup.find('ul', class_=\"left-tree\").li.a[\"href\"]\r\n else:\r\n league_soup, league_site = get_first_search_result(\r\n SOCCER_URL + \"/search/competitions/?q=\" + inp_to_add)\r\n\r\n if league_soup:\r\n cur.execute(\"SELECT MAX(id) FROM leagues\")\r\n league_id = cur.fetchall()[0][0]\r\n\r\n addition = (league_soup.body.h1.text, league_soup.body.h2.text, league_site)\r\n cur.execute(\"\"\"INSERT INTO leagues (name, country, url) VALUES (%s, %s, %s)\"\"\", addition)\r\n con.commit()\r\n\r\n league_dict = {league_id: {'name': addition[0], 'url': addition[2]}}\r\n add_all_teams_and_players_in_league(league_dict, con, host, root, password)", "def add_fleet(self, index, *args, **kw):\n\n fleetid = self.fleets.append(ListNode(\"{0!s}\".format(kw.get(\"name\", \"Fleet {0:d}\".format(index))), [\n ListNode(\"Nodes\"),\n ListNode(\"Behaviours\", data=kw.get(\n \"behaviours\", self.defaults[2].get_data()))\n ])\n )\n for i in range(kw.get(\"nodes\", 1)):\n self.add_node(fleetid)", "def new_room(request):\n print(\"new room\")\n new_room = None\n while not new_room:\n with transaction.atomic():\n label = haikunator.haikunate()\n if Room.objects.filter(label=label).exists():\n continue\n new_room = Room.objects.create(label=label)\n return redirect('chat:chat_room', label=label)", "def add_battery():\n data = request.get_json()\n battery = battery_rent_service.add(**data)\n battery = model_to_dict(battery)\n return jsonify({'response': battery}), 200", "def add_vehicle(self, vehicle_type, longitude, latitude, battery):\n return run_transaction(self.sessionfactory,\n lambda session: add_vehicle_txn(session,\n vehicle_type,\n longitude,\n latitude,\n battery))", "def add_factory():\n\n if request.method == 'POST':\n add_new_factory_schema = AddNewFactory()\n\n errors = add_new_factory_schema.validate(data=request.form)\n\n if errors:\n abort(400, str(errors))\n\n args = add_new_factory_schema.dump(request.form)\n\n factory = Factory(root_uri=os.environ['ROOT_BACKEND_URI'])\n factory.add_factory(\n factory_name=args['factory_name'],\n size=args['size'],\n city=args['city']\n )\n\n return redirect(url_for('show_documentation.show_factories'))\n\n return render_template('pages/inputs/add_factory.html')", "def add_to_database():\n db_conn.execute(\"INSERT INTO Fietsenstalling (Naam, Achternaam, Adress, FietsNr, PIN) VALUES \"\n \"(?, ?, ?, ?, ?);\",(Naam, Achternaam, Adress, FietsNr, PIN))\n\n db_conn.commit()", "def fusion_api_add_or_update_appliance_trap_destination(self, body=None, id=None, api=None, headers=None): # pylint: disable=W0622\n return self.trap.create(body=body, id=id, api=api, headers=headers)", "def add_new_tag():\n\n name = request.form.get('name')\n\n new_tag = Tag(name=name)\n db.session.add(new_tag)\n db.session.commit()\n\n return redirect(f'/tags')", "def new(self, obj):\n if obj:\n self.__session.add(obj)", "def new(self, obj):\n if obj:\n self.__session.add(obj)", "def add_item(self, name, url):\n self.insert(\"\", \"end\", values=(name, url, \"\"))\n # Add the item - backend\n s.updateItem({\"item\": name, \"url\": url, \"status\": \"\", \"pstatus\": \"\"})\n\n self.selection_clear()", "def test_topo_add_herbviore():\n instance = topo.Topography()\n instance.add_animal(animals.Herbivores())\n assert len(instance.herbivore_list) == 1", "def add_vehicle():\n form = VehicleForm()\n if form.validate_on_submit():\n try:\n new_info = movr.add_vehicle(vehicle_type=form.vehicle_type.data,\n longitude=form.longitude.data,\n latitude=form.latitude.data,\n battery=form.battery.data)\n except IntegrityError as e:\n return render_error_page(e, movr)\n vehicle_id = new_info['vehicle_id']\n\n # check to verify that vehicle was added\n new_vehicle = movr.get_vehicle(vehicle_id)\n if new_vehicle is None: # Insert didn't work\n flash((\"Vehicle with id `{}` \"\n \"NOT successfully added. Edit add_vehicle_txn in \"\n \"movr/transactions.py to add the vehicle to the database.\"\n ).format(vehicle_id))\n redirect(url_for('add_vehicle', _external=True))\n else: # Inserted vehicle was found\n flash('Vehicle added! \\nid: {}'.format(vehicle_id))\n return redirect(\n url_for('vehicle', vehicle_id=vehicle_id, _external=True))\n\n # form not properly filled out yet\n return render_template('add_vehicle.html',\n title='Add a vehicle',\n form=form)", "def add_food_order(self, chair_num, _dish):\n self.customers[chair_num].add_food(_dish)", "def insert(self, unhealthy_product, name, description, stores, url):\n self.database.query('''INSERT INTO History\n VALUES (NULL,\n NOW(),\n :unhealthy_product,\n :healthy_product,\n :description,\n :stores,\n :url)''',\n unhealthy_product=unhealthy_product,\n healthy_product=name,\n description=description,\n stores=stores,\n url=url)\n print(f'La substitution du produit \"{name}\" a été ajoutée à la table \\\nHistory !', file=open('print_log.txt', 'a'))", "def add(self, product):\n pass", "def add_edition(self, edition): \n self.editions[edition.id] = edition", "def add_route(enode, route, via, shell=None):\n via = ip_address(via)\n\n version = '-4'\n if (via.version == 6) or \\\n (route != 'default' and ip_network(route).version == 6):\n version = '-6'\n\n cmd = 'ip {version} route add {route} via {via}'.format(\n version=version, route=route, via=via\n )\n\n response = enode(cmd, shell=shell)\n assert not response", "def add(self, item, request):\n\n assert (\n isinstance(item, Election)\n or isinstance(item, ElectionCompound)\n or isinstance(item, Vote)\n )\n\n self.session.add(item)\n self.session.flush()\n\n self.update(item, request)\n self.session.flush()", "def add_entry(self, scenario_info):\n scenario_id, status = scenario_info[\"id\"], \"created\"\n sql = self.insert()\n self.cur.execute(\n sql,\n (\n scenario_id,\n status,\n ),\n )", "def add_entry(self, scenario_info):\n scenario_id, status = scenario_info[\"id\"], \"created\"\n sql = self.insert()\n self.cur.execute(\n sql,\n (\n scenario_id,\n status,\n ),\n )", "def create(self, new_feature):\n all_data = self._load()\n\n # Hijack the feature id and make sure it's unique\n new_feature['id'] = str(uuid.uuid4())\n\n all_data['features'].append(new_feature)\n\n with open(self.path, 'w') as dst:\n dst.write(json.dumps(all_data))", "def create_place():\n form = CreatePlacesForm(request.form)\n if form.validate_on_submit():\n # set the collection\n places_db = mongo.db.places\n # insert the new recipe\n places_db.insert_one({\n 'name': request.form['name'],\n 'city': request.form['city'],\n 'added_by': session['username'],\n 'description': request.form['description'],\n 'tags': request.form['tags'],\n 'image': request.form['image'],\n 'views': 0\n })\n return redirect(url_for('home'))\n return render_template('create_restaurant.html', form=form)", "def _handle_add_new_ingredient(self):\n try:\n username = self.food_service.get_user().get_username()\n converted_date = self.food_service.convert_expire_date(self._date.get())\n today = int(time.time())\n self.food_service.add_ingredient(today, self._ingredient.get(),\n converted_date, username)\n\n self._show_ingredient_list()\n self._delete_txt_fields_and_msg()\n except ValueError:\n self._message_var.set(\"Use the correct form of date: dd/mm/yyyy.\")", "def add_booking(user_id, rest_id, number_of_people, booking_datetime, table_id, entrance_datetime=None):\r\n try:\r\n booking = Booking()\r\n booking.restaurant_id = rest_id\r\n booking.user_id = user_id\r\n booking.booking_datetime = booking_datetime\r\n booking.entrance_datetime = entrance_datetime\r\n booking.number_of_people = number_of_people\r\n booking.table_id = table_id\r\n booking.datetime = datetime.datetime.now()\r\n db.session.add(booking)\r\n db.session.commit()\r\n return booking.id\r\n except:\r\n db.session.rollback()\r\n return None", "def add_instance(self,name):\n new = self.create_instance(name)\n self.model.append(new)\n return new", "def add(self):\r\n self._svn('add')", "def add_employee(self, employee):\n self.employees.add(employee)", "def newMenuItem(restaurant_id):\n\n if 'access_token' not in flask_session:\n return logInRedirect()\n restaurant = session.query(Restaurant).filter_by(id = restaurant_id).first()\n user_id = getUserId(flask_session['email'],flask_session['google_plus_id'])\n if not restaurant.user_id == user_id:\n flash(\"Only restaurant owners can add new items.\")\n return redirect(url_for(\"publicMenu\",restaurant_id = restaurant_id))\n\n if request.method == \"POST\":\n new_name = request.form['new_name']\n print \"\\nnewMenuItem POST triggered, name is: \", new_name\n newMenuItem = MenuItem( name=new_name,\n restaurant_id=restaurant.id )\n session.add(newMenuItem)\n session.commit()\n flash( \"new item '\" + new_name + \"' created!\")\n print \"POST worked!\"\n return redirect(url_for(\"showMenu\", restaurant_id=restaurant.id))\n\n else:\n return render_template('newMenuItem.html', restaurant = restaurant)", "def addOne(self):\n m = self.request.get('milestone')\n self._createMilestone(m)\n return self.request.response.redirect(self.context.absolute_url())", "def add_entry():\n if not check_admin_logged() :\n abort(403)\n\n title = request.form['title']\n category = request.form['category']\n buydate = request.form['buydate']\n introduction = request.form['introduction']\n\n if not check_items_in_form(title, category, buydate):\n return redirect(url_for('show_entries_admin'))\n\n new_entry = Entries(title, category, buydate, introduction)\n db.session.add(new_entry)\n\n try :\n db.session.commit()\n except IntegrityError as e :\n flash(e.message)\n return redirect(url_for('show_entries_admin'))\n\n flash(u'成功添加新的条目')\n return redirect(url_for('show_entries_admin'))", "def add_new_flower() -> Union[str, Response]:\n if request.method == \"POST\":\n flower_name = request.form[\"flower_name\"]\n quantity = request.form[\"quantity\"]\n price = request.form[\"price\"]\n valid_quantity = validate_int(quantity)\n valid_price = validate_float(price)\n if not valid_quantity or not valid_price:\n flash(\"Invalid entry\", \"danger\")\n return render_template(\"add_new_flower.html\")\n add = AddFlower(flower_name)\n add.add_new_in_stock(valid_quantity, valid_price)\n return redirect(url_for(\"add_flower\", items=STOCK))\n return render_template(\"add_new_flower.html\")" ]
[ "0.63080275", "0.5862244", "0.58347523", "0.5819879", "0.5525398", "0.5483701", "0.54169357", "0.5386112", "0.5373534", "0.52948624", "0.52928376", "0.5286119", "0.5283983", "0.5268902", "0.5244285", "0.52413", "0.5239886", "0.52386993", "0.5225297", "0.5223356", "0.52185214", "0.51923454", "0.51849073", "0.51708066", "0.5161593", "0.51608807", "0.5150096", "0.5136178", "0.5118596", "0.51046026", "0.50889176", "0.5084965", "0.50813055", "0.5074567", "0.50668657", "0.5063727", "0.5063089", "0.5063089", "0.5063089", "0.5063089", "0.5063089", "0.5063089", "0.5063089", "0.5063089", "0.5062624", "0.5060247", "0.5044881", "0.504145", "0.50385725", "0.5038489", "0.50367796", "0.5034152", "0.5034152", "0.50215715", "0.5020684", "0.501981", "0.5018246", "0.501445", "0.5009417", "0.50027287", "0.50027287", "0.49944776", "0.49935412", "0.49934015", "0.49927536", "0.49674612", "0.49668652", "0.49635038", "0.49583694", "0.49581552", "0.49517542", "0.49460638", "0.49394056", "0.49248713", "0.4923437", "0.49135324", "0.49134782", "0.49134782", "0.49118623", "0.49099845", "0.4907093", "0.49010843", "0.48873615", "0.4883314", "0.48737666", "0.48718992", "0.4871071", "0.48683953", "0.48683953", "0.4863793", "0.48633856", "0.48550105", "0.485227", "0.48503542", "0.48483175", "0.4847432", "0.48473173", "0.48424894", "0.48403525", "0.4833426" ]
0.67610687
0
Add inventory to a given hotel
async def add_inventory_endpoint(request): hotel_id = request.args["hotel_id"][0] room_type = request.args["room_type"][0] room_inventory = request.args["room_inventory"][0] model.add_inventory(hotel_id, room_type, room_inventory) return json({"success": True})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_to_inventory(self, newItem):\n\n if len(self.player_inventory) >= 8:\n print(\"\"\"You already have the maximum of 7 items in your inventory,\n looks like you will need to get rid of an item to get {}\"\"\".format(newItem.name))\n\n print(\"Would you like to get rid of an item to add the {} to your inventory?\".format(newItem.name))\n\n if 'yes' in choice:\n dropping = player_inventory.drop()\n print(dedent('Okay, {} was removed from your inventory.'.format(item_name)))\n\n elif 'no' in choice:\n print(dedent('Okay redirecting you back to shop.'))\n return False\n\n else:\n print(dedent('Seems like you did not make a valid choice, aborting ...'))\n return False\n\n else:\n\n if newItem.type == \"food\":\n self.player_inventory[newItem.name] = newItem.health_addition\n elif newItem.type == \"weapon\":\n self.player_inventory[newItem.name] = newItem.quality\n\n print(dedent(\"\"\"\n ##############################################\n Nice, the {} has been added to your inventory!\n \"\"\".format(newItem.name)))", "def add_to_inventory(self, item):\n\t\tif item in self.inventory:\n\t\t\tself.inventory[item] += 1\n\t\telse:\n\t\t\tself.inventory[item] = 1", "def load_inventory(self):\n for item in self.items:\n self.rooms[int(item.initial_room_id) - 1].inventory.add(item)", "def inventory_add(self, item):\n if (len(self.ItemList) >= self.InventorySize):\n # Inventory full\n return 2\n self.ItemList.append(item)\n return 0", "def addToInventory(modList, item):\r\n modList.append(item)", "def inventoryAdd(obj):\n size=1\n if obj==\"TSA Trophy\":\n size =2\n print(\"The TSA Trophy takes two hands to pick up.\")\n if len(inventory)+size>2:\n print(\"Your hands are too full to pick up\",obj+\".\")\n else:\n print(\"You picked up\",obj)\n inventory.append(obj)\n inventoryCall()", "def add_to_inventory(self, item_to_add_to_inventory):\n raise NotImplementedError(\"Subclasses define what adding to the inventory entails\")", "def add_appliance(itemcode, description, marketprice, rentalprice):\n\n itembrand = input(\"Enter item brand: \")\n itemvoltage = input(\"Enter item voltage: \")\n newitem = ElectricAppliances \\\n (itemcode, description, marketprice, rentalprice,\n itembrand, itemvoltage)\n\n FULLINVENTORY[itemcode] = newitem.returnasdictionary()\n print(\"New inventory item added\")", "def add_to_inventory(item, location, quantity, user=None):\n\n try:\n inventory = Inventory.objects.get(item=item, location=location)\n inventory.quantity += quantity\n inventory.save()\n except ObjectDoesNotExist:\n inventory = Inventory.objects.create(item=item, location=location, quantity=quantity)\n\n transaction = InventoryTransaction.objects.create(inventory=inventory, quantity=quantity, user=user)\n\n return transaction", "def add_inventory(cd_instance, lst_Inventory):\r\n \r\n lst_Inventory.append(cd_instance) \r\n return lst_Inventory", "def add_item(self,itm,qty=1):\n inv = self.get_inventory()\n s = str(itm)\n inv[s] = inv.get(s, 0) + qty\n self.put_inventory(inv)", "def addEquipmenttoRecipe(recipe):\r\n db = db_helpers.getDbCon()\r\n cursor = db.cursor()\r\n recipe_instruction_id = instruction_helpers.getRecipeInstructionID(recipe)\r\n recipeEquipmentInsertQuery = \"\"\"INSERT into recipe_equipment (recipe_instruction_id, equipment_id) VALUES (%s, %s)\"\"\"\r\n try:\r\n for ind, instr in enumerate(recipe.instructions):\r\n for equip in instr.equipment:\r\n cursor.execute(recipeEquipmentInsertQuery, (recipe_instruction_id[ind], equip.equipment_id))\r\n db.commit()\r\n except Exception:\r\n print('Error: OOPs something went wrong while adding Equipment to a Recipe!')\r\n finally:\r\n cursor.close()\r\n db.close()", "def add_newInventory(id, title, artist, table):\r\n dicRow = {'ID': id, 'Title': title, 'Artist': artist}\r\n table.append(dicRow)", "def put_in(self, item):\n try:\n self.bag_of_holding.append(item)\n print(\"You have added {} to your inventory.\".format(item))\n except:\n print('Error in Inventory method: put_in')", "def _add_to_inv(self, block_):\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1", "def add_ingredient_to_shop_list (self, ingredient) :\n found = False\n qty_available = self.quantity_in_fridge (ingredient)\n for ing in self.shop_list :\n if ing.equals(ingredient) :\n qty_needed = ingredient.quantity - qty_available\n ing.add_quantity (qty_needed)\n found = True\n if found == False :\n ingredient.set_quantity(ingredient.quantity - qty_available)\n self.shop_list.append(ingredient)", "def additemtoinventory(item):\n global ITEM_COUNT\n for i in range(0, 10): # first 10 items are weapons, (this code sux, need a better way of doing this)\n if ITEMTYPES[ITEM_LIST[ZERO_BASE_PLYR_POS]] == ITEMTYPES[i]: \n cur_weapon_strength = WEAPON_STRENGTHS[ITEMS[0]]\n new_weapon_strength = WEAPON_STRENGTHS[ITEMTYPES[i]]\n if new_weapon_strength > cur_weapon_strength:\n change_weapon(ITEMTYPES[i])\n ITEMS[0] = ITEMTYPES[i] # 'overwrite' the main weapon with the new one\n remove_item_from_map()\n return # exit here if item is weapon\n else:\n remove_item_from_map()\n return # remove the inferior weapon from the map and return\n ITEMS.append(ITEMTYPES[item])\n ITEM_COUNT = len(ITEMS)\n remove_item_from_map()", "def add_item_to_inventory(game, *args):\n (item, action_description, already_done_description) = args[0]\n if not game.is_in_inventory(item):\n print_bold(action_description)\n game.add_to_inventory(item)\n print_italic(\"You've just got a {item}.\".format(item=item.name))\n else:\n print_italic(already_done_description)\n return False", "def add_to_inventory(self, item, quantity):\n\t\tincreaseQuantity = None\n\t\taddToDict = True\n\t\tfor key in self.inventoryDictionary:\n\t\t\tif key.name == item.name:\n\t\t\t\taddToDict = False\n\t\t\t\tincreaseQuantity = key\n\t\t\t\tbreak\n\t\t\t\t\n\n\t\t\telse:\n\t\t\t\taddToDict = True\n\t\t\t\t\n\n\t\tif addToDict:\n\t\t\tself.inventoryDictionary[item] = quantity\n\t\telse:\n\t\t\tself.inventoryDictionary[increaseQuantity] += quantity", "def add_food_to_bag(self):\n self.food_eaten.set(sum([species.food.get() for species in self.ecosystem]))", "def fill_ingredient(self, ingredient: str, quantity: int) -> None:\n self.inventory_availability[ingredient] = quantity", "def test_add_item(self):\n self.inv.add_item(self.item_helmet)\n str_inventory = self.inv.pretty\n str_item = self.item_helmet.pretty\n\n self.rebuild_instance()\n str_unequipped = self.inv.unequipped[0].pretty\n\n assert str_inventory == self.inv.pretty\n assert str_item == str_unequipped", "def add_food(self, _food):\n self.food.append(_food)", "def add_item(item):\n # Check first if the item already exists in the inventory\n for i in get_inventory():\n if i['name'] == item['name']:\n print(f\"[ERROR] item with name {i['name']} already exists\")\n break\n else:\n print(f'[INFO] Adding item {item}')\n INVENTORY.append(item)\n # mongo.collection().insert_one(item)", "def inventory(self, inventory):\n\n self._inventory = inventory", "def add_inventory_group(self, key):\n host_dict = {'hosts': [], 'vars': {}}\n self.inventory[key] = host_dict\n return", "def getitem(self):\n self.inventory += 1", "def add_new_item():\n #global FULL_INVENTORY\n item_code = get_input(\"Enter item code: \")\n item_desc = get_input(\"Enter item description: \")\n item_rental_price = get_input(\"Enter item rental price: \")\n\n # Get price from the market prices module\n item_price = market_prices.get_latest_price(item_code)\n new_inventory_item = inventory_class.Inventory(item_code, item_desc,\n item_price, item_rental_price)\n is_furniture = get_input(\"Is this item a piece of furniture? (Y/N): \")\n if is_furniture.lower() == \"y\":\n item_material = get_input(\"Enter item material: \")\n item_size = get_input(\"Enter item size (S,M,L,XL): \")\n new_item = furniture_class.Furniture(new_inventory_item, item_material, item_size)\n else:\n is_electrical_appliance = get_input(\"Is this item an electric appliance? (Y/N): \")\n if is_electrical_appliance.lower() == \"y\":\n item_brand = get_input(\"Enter item brand: \")\n item_voltage = get_input(\"Enter item voltage: \")\n new_item = elec_appliances_class.ElecAppliances(new_inventory_item,\n item_brand, item_voltage)\n else:\n new_item = new_inventory_item\n FULL_INVENTORY[item_code] = new_item.return_as_dictionary()\n print(\"New inventory item added\")\n return new_item.return_as_dictionary", "def newEquipment(recipe):\r\n db = db_helpers.getDbCon()\r\n cursor = db.cursor()\r\n equipmentInsertQuery = \"\"\"INSERT into equipment (equipment_id, equipment_name) \r\n VALUES (%s, %s) ON Duplicate KEY UPDATE equipment_id = equipment_id;\"\"\"\r\n try:\r\n for instr in recipe.instructions:\r\n for equip in instr.equipment:\r\n cursor.execute(equipmentInsertQuery, (equip.equipment_id, equip.equipment_name))\r\n db.commit()\r\n except Exception:\r\n print(\"Error: OOPs something went wrong while adding new equipment to the database\")\r\n finally:\r\n cursor.close()\r\n db.close()", "def add_entry(barcode: str, location: str):\n # Log the current date and time and append row to inventory sheet\n row = utils.datetime_array() + [barcode, location]\n sheet.append_row(row)", "def add_inventory(cls,filepath):\n new_inventory = {}\n try:\n productName = input('Please enter the name of product: ')\n inventoryFactory.name = productName # set product name\n new_inventory['name'] = inventoryFactory.name # get product name\n if not new_inventory['name']:\n raise ValueError('Error: Empty Product Name')\n\n productWeight = float(input('Please enter weights of product: '))\n inventoryFactory.weight = productWeight\n new_inventory['weight'] = inventoryFactory.weight\n if not new_inventory['weight']:\n raise ValueError('Error: Empty Product Weight')\n\n productPrice = float(input('Please enter product price: '))\n inventoryFactory.price = productPrice\n new_inventory['price'] = inventoryFactory.price\n if not new_inventory['price']:\n raise ValueError('Error: Empty Product Price')\n\n except ValueError as err:\n print(err)\n print('Product not added.')\n exit()\n\n except KeyboardInterrupt:\n print('\\nHiting the interrupt key.')\n print('Product not added.')\n exit()\n\n print(new_inventory)\n print('1.Add into Rice\\n2.Add into Pulses\\n3.Add into Wheats')\n choice = int(input('Enter where you want to add: '))\n switcher = {\n 1 : lambda: cls.save_inventorybook(filepath, 'Rice', new_inventory),\n 2 : lambda: cls.save_inventorybook(filepath, 'Pulses', new_inventory),\n 3 : lambda: cls.save_inventorybook(filepath, 'Wheats', new_inventory)\n }\n func = switcher.get(choice, lambda: 'Invalid choice please select correct options.')\n func()", "def collect(item):\n inventory.append(item)\n print(f'You now have: {inventory}')", "def adauga_inventar(id, nume, descriere, pret, locatie, lista):\n if get_by_id(id, lista) is not None:\n raise ValueError(\"Id-ul exista deja\")\n inventar = creeaza_inventar(id, nume, descriere, pret, locatie)\n if float(get_pret(inventar))\\\n < 0:\n raise ValueError(\"pretul este negativ! Incorect\")\n return lista + [inventar]", "def add_to_inventory(self, item_name: str, quantity: int) -> None:\n raise_if_false(quantity >= 0, f\"Quantity [{quantity}] can't be negative\")\n\n # If the item is already in inventory, just increment the quantity\n if item_name in self._player_data.inventory:\n self._player_data.inventory[item_name] += quantity\n else:\n self._player_data.inventory[item_name] = quantity", "def test_update_inventory(self):\n pass", "def update(self):\n try:\n data = self.api.get_inventory(self.site_id)\n inventory = data[\"Inventory\"]\n except KeyError:\n _LOGGER.error(\"Missing inventory data, skipping update\")\n return\n except (ConnectTimeout, HTTPError):\n _LOGGER.error(\"Could not retrieve data, skipping update\")\n return\n\n self.data = {}\n self.attributes = {}\n\n for key, value in inventory.items():\n self.data[key] = len(value)\n self.attributes[key] = {key: value}\n\n _LOGGER.debug(\"Updated SolarEdge inventory: %s, %s\", self.data, self.attributes)", "def add_bag(self, bag, quantity):\n self.bags.append((bag, quantity))", "def add_to_basket(self, item):\n self._products.append(item)", "def replenish(self, amount: int):\n self._inventory += amount", "def act(self, **kwargs):\n source_entity = kwargs[action.SOURCE_ENTITY]\n if action.EQUIPMENT_SLOT in kwargs:\n equipment_slot = kwargs[action.EQUIPMENT_SLOT]\n else:\n equipment_slot = self.get_equipment_slot(source_entity)\n old_item = None\n if source_entity.equipment.slot_is_equiped(equipment_slot):\n old_item = source_entity.equipment.unequip(equipment_slot)\n self._re_equip(source_entity, equipment_slot)\n if not old_item is None:\n source_entity.inventory.try_add(old_item)\n self.add_energy_spent_to_entity(source_entity)", "def inventory_items(self, inventory_items):\n\n self._inventory_items = inventory_items", "def pickUp(self):\n pos = self.getRoverLocation()\n item = self.map[pos.y,pos.x]\n if type(item) == Part:\n self.inventory.addPart(str(item))\n self.map[pos.y,pos.x] = None", "def add(table):\n\n # your code\n inventory_data = [\"Product: \", \"Manufacturer: \", \"Purchase Year: \", \"Durability: \"]\n inputs = ui.get_inputs(inventory_data, \"Add item\")\n ID = common.generate_random(table)\n table.append([ID, *inputs])\n return table", "def restock_inventory(inventory):\r\n\tnew_dictionary = {} # create a new emply dictionary\r\n\tfor key, value in inventory.items(): # look for key value pairs\r\n\t\tinventory[key] = value + 10 # in the inventory with the key lets say pencil increase the value by 10\r\n\treturn (inventory) # returns the updated dictionary\r", "def addEquipmenttoUser(user_id, recipe):\r\n db = db_helpers.getDbCon()\r\n cursor = db.cursor()\r\n userEquipmentInsertQuery = \"\"\"INSERT into user_equipment (user_id, equipment_id) VALUES (%s, %s)\"\"\"\r\n try:\r\n for instr in recipe.instructions:\r\n for equip in instr.equipment:\r\n cursor.execute(userEquipmentInsertQuery, (user_id, equip.equipment_id))\r\n db.commit()\r\n except Exception:\r\n print('Error: OOPs something went wrong while adding Equipment to a user!')\r\n finally:\r\n cursor.close()\r\n db.close()", "async def add_hotel_endpoint(request):\n hotel_name = request.args[\"hotel_name\"][0]\n hotel_id = model.add_hotel(hotel_name)\n return json({\"hotel_id\": hotel_id})", "def add(self, product, qty):\n product_id = str(product.id)\n\n if product_id in self.basket:\n self.basket[product_id]['qty'] = qty\n else:\n self.basket[product_id] = {'price': str(product.price), 'qty': qty}\n\n self.save()", "def equip(self, item, actor):\n if (item.slot not in self.EqSlots.keys()):\n # Not an equipment.\n return 1\n\n old_item = self.EqSlots.get(item.slot)\n\n # Ok, equip and remove from list.\n self.EqSlots[item.slot] = item\n self.inventory_remove(item)\n item.give_bonus(actor)\n\n if (old_item is not None):\n # Was not empty - remove (any) old equipment bonus and add to inventory\n old_item.remove_bonus(actor)\n self.inventory_add(old_item)\n return 0", "def add_food_order(self, chair_num, _dish):\n self.customers[chair_num].add_food(_dish)", "def _add_re_inventories_to_database(invs):\n\n set_ids_to_delete = set(\n [n[0] for n in filter(None, invs)]) # list of just the _set ids to remove them from the database\n\n timestamp = syt.get_timestamp()\n for s in set_ids_to_delete:\n db.run_sql(\"DELETE FROM re_inventories WHERE set_id = ?\", (s,))\n db.run_sql(\"UPDATE sets SET last_inv_updated_re = ? WHERE id = ?\", (timestamp, s))\n db.batch_update(\n 'INSERT OR IGNORE INTO re_inventories(set_id, piece_id, quantity, color_id) VALUES (?,?,?,?)',\n invs)\n\n syt.log_debug(\"Added {} unique pieces to database for {}\".format(len(invs), len(set_ids_to_delete)))", "def addnewitem():\n\n itemcode = input(\"Enter item code: \")\n itemdescription = input(\"Enter item description: \")\n itemrentalprice = input(\"Enter item rental price: \")\n\n # Get price from the market prices module\n itemprice = get_latest_price(itemcode)\n\n isfurniture = input(\"Is this item a piece of furniture? (Y/N): \")\n if isfurniture.lower() == \"y\":\n add_furniture(itemcode, itemdescription, itemprice, itemrentalprice)\n else:\n iselectricappliance = input(\"Is this item an electric appliance?\"\n \" (Y/N): \")\n if iselectricappliance.lower() == \"y\":\n add_appliance(itemcode, itemdescription, itemprice, itemrentalprice)\n add_non_furniture_nor_appliance(itemcode, itemdescription, itemprice,\n itemrentalprice)\n print(\"New inventory item added\")", "def with_water(self, water):\n self.ingredients.append(water)\n return self", "def populate_initial_inventory(self):\r\n\r\n weapons_file = open('initial-inventory.json', \"r\")\r\n json_data = json.loads(weapons_file.read())\r\n weapons_file.close()\r\n\r\n weapons = json_data['weapons']\r\n for weapon in weapons:\r\n requests.post(\"http://\" + self.ip_address + \":3000/Weapons\", data=weapon)", "def pickUpItem(self, app, newItem: Stack):\n\n if newItem.isEmpty(): return\n\n # Prioritize existing stacks of the item first\n for (i, slot) in enumerate(self.inventory):\n stack = slot.stack\n if stack.isInfinite() and stack.item == newItem.item:\n # It just stacks into an infinite slot, so no change\n return\n elif newItem.isInfinite() and stack.item == newItem.item:\n # ditto\n return \n elif stack.amount > 0 and stack.item == newItem.item:\n self.inventory[i].stack.amount += newItem.amount\n return\n\n # If that fails, then just add the item to the next open space\n for (i, slot) in enumerate(self.inventory):\n if slot.isEmpty():\n self.inventory[i].stack = newItem\n return\n \n # TODO: Full inventory??\n 1 / 0", "def get_inventory(self, node):", "def test_add_new_electric(self):\n input_vars = ['5', 'Shaver', '1', 'n', 'y', 'Norelco', '110']\n inventory = {}\n with patch('builtins.input', side_effect=input_vars):\n main.add_new_item(inventory)\n self.assertEqual(inventory['5'],\n {\n 'product_code': '5',\n 'description': 'Shaver',\n 'market_price': 24,\n 'rental_price': '1',\n 'brand': 'Norelco',\n 'voltage': '110'\n })", "def do_put(self, arg):\r\n\r\n # put this value in a more suitably named variable\r\n itemToStore = arg.lower()\r\n\r\n # get a list of all \"description words\" for each item in the inventory\r\n invDescWords = getAllDescWords(inventory)\r\n \r\n # Nice little easter egg :) \r\n if itemToStore == 'troll in bag':\r\n print(bcolors.start + \"You cannot put troll in bag, troll is a creature.\" + bcolors.end)\r\n return\r\n\r\n # find out if the player doesn't have that item\r\n if itemToStore not in invDescWords:\r\n print('You want to put \"%s\" in what?!' % (itemToStore))\r\n return\r\n \r\n\r\n # get the item name that the player's command describes\r\n item = getFirstItemMatchingDesc(itemToStore, inventory)\r\n if item != None:\r\n print('You put %s. in the container.' % (worldItems[item][SHORTDESC]))\r\n inventory.remove(item) # remove from inventory\r\n worldRooms[location][ITEMINV].append(item) # add to the container\r", "def openinv(cls): #THIS DOESN'T NEED TO BE MODIFIED!\n\n while True:\n inventory_items = {thing.id: thing.name for thing in cls.inventory}\n inventory_items[\"exit\"] = \"Exit Inventory\"\n inventory_items[\"newln\"] = \"\"\n inventory_items[\"playername\"] = str(gray('\"{}\"'.format(cls.name)))\n inventory_items[\"lv\"] = str(gray(\"LV: {}\".format(cls.lv)))\n inventory_items[\"hp\"] = str(gray(\"HP: {}/{}\".format(cls.hp, cls.max_hp)))\n inventory_items[\"exp\"] = str(gray(\"EXP: {}/40\".format(cls.exp)))\n\n choice = Menu.menu(\n title = \"Inventory\",\n contents = inventory_items \n )\n if choice == \"exit\":\n Terminal.clear_all()\n return\n while True:\n displayed_item = next((thing for thing in cls.inventory if thing.id == choice), None)\n final_choice = Menu.menu(\n title = displayed_item.name,\n contents = {\n \"interact\":displayed_item.interact_label,\n \"inspect\":\"Inspect\",\n \"drop\":\"Drop\",\n \"back\":\"Back\"\n }\n )\n if final_choice == \"back\":\n break\n if final_choice == \"interact\":\n use = displayed_item.interact()\n Terminal.clear_all()\n print(use[\"message\"])\n if \"heal_\" in use[\"action\"]:\n cls.hp += int(use[\"action\"].replace(\"heal_\", ''))\n if cls.hp > cls.max_hp:\n cls.hp = cls.max_hp\n cls.inventory.remove(displayed_item)\n Game.standard_wait()\n break\n if final_choice == \"inspect\":\n Terminal.clear_all()\n print(displayed_item)\n Game.standard_wait()\n continue\n if final_choice == \"drop\":\n Terminal.clear_all()\n print(\"You dropped the {}\".format(displayed_item.name))\n cls.inventory.remove(displayed_item)\n Game.standard_wait()\n break", "def addItem(self, itemTypeStr, itemId = None):\n if itemTypeStr not in self.__inventory__:\n self.__inventory__[itemTypeStr] = []\n if itemId == None:\n if itemTypeStr in AgentInventory.__idQueue__ and len(AgentInventory.__idQueue__[itemTypeStr]) > 0:\n itemId = AgentInventory.__idQueue__[itemTypeStr][0]\n self.dequeueItem(itemTypeStr)\n else:\n itemId = \"{}{}\".format(itemTypeStr, self.getId())\n item = Item(itemId, itemTypeStr)\n self.__inventory__[itemTypeStr].append(item)\n return item", "def equip(self, command):\n\n if len(command) > 1:\n if not self.weapon:\n for item in self.inventory:\n if item.name == command[1]:\n if command[1] == 'knife' or command[1] == 'rock' or command[1] == 'stick' or command[1] == 'lamp':\n self.inventory.remove(item)\n self.weapon.append(item)\n print(\"You equipped a \" + item.name)\n return\n else:\n print(\"You can't equip that\")\n else:\n print('You cannot equip two items \\nYou must unequip the ' + self.weapon[0].name + ' first.')\n else:\n print(\"Equip what?\")", "def add_item(product, price):\n ADD_PRODUCTS[product] = price", "def _place(self, loc, exclude=None, block_=None):\n if not self._inventory:\n raise Exception('Inventory empty')\n\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception((\n 'You requested not to place %s, but it is the only '\n 'block in the inventory.' % exclude\n ))\n\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n\n self._set_block(loc, block_)", "async def list_inventory_endpoint(request):\n hotel_id = request.args[\"hotel_id\"][0]\n start_date = request.args[\"start_date\"][0]\n end_date = request.args[\"end_date\"][0]\n inventory = model.list_inventory(hotel_id, start_date, end_date)\n if inventory == model.OPERATION_ERROR_RETURN_CODE:\n return json({\"success\": False})\n return json({\"success\": True, \"inventory\": inventory})", "def _insert(self, item):\n if item.room is not None:\n item.room.remove(item)\n\n item.player = self\n self._inventory.append(item)\n\n # if the item is a container, add to inventory its contents\n if item.container:\n for con_item in item.items:\n self._insert(con_item)", "def add_ingredient_to_recipe(cls, new_count, ingredients_dict, recipe_id):\n\n for i in range(1, (new_count+1)):\n item = ingredients_dict[i][0]\n measure = ingredients_dict[i][1]\n prepnotes = ingredients_dict[i][2]\n qty = ingredients_dict[i][3]\n\n new_ingredient = Ingredient(recipe_id=recipe_id, item=item, quantity=qty,\n measure=measure, prep_notes=prepnotes)\n\n db.session.add(new_ingredient)\n db.session.commit()\n print \"You successfully added ingredients!\"", "def add_herbivores(self, animal, animal_list):\n self.island_map[animal_list['loc'][0]][\n animal_list['loc'][1]].population.append(\n Herbivore(age=animal['age'], weight=animal['weight']))", "def add_to_cart(db, itemid, quantity):", "def add_node(self, node):\n\n public_ip = [addr.address for addr in node.ipaddresses if addr.is_public][0]\n dest = public_ip\n\n # Add to index\n self.index[dest] = node.api_id\n\n # Inventory: Group by node ID (always a group of 1)\n self.inventory[node.label] = [dest]\n\n # Inventory: Group by datacenter city\n self.push(self.inventory, self.get_datacenter_city(node), dest)\n\n # Inventory: Group by dipslay group\n self.push(self.inventory, node.display_group, dest)", "def add(self, offer):\n other_offer = self.get(offer.get_price(), offer.get_way())\n if other_offer:\n other_offer.add_quote_amount(offer.get_quote_amount())\n other_offer.add_base_amount(offer.get_base_amount())\n return\n self.book[offer.get_way()].append(offer)\n self.book[offer.get_way()] = sorted(self.book[offer.get_way()], key=lambda entry: entry.get_price(),\n reverse=(offer.get_way() == Trade.WAY_BUY))", "def do_merge(self, cr, uid, ids, context=None): \n invent_obj = self.pool.get('stock.inventory')\n invent_line_obj = self.pool.get('stock.inventory.line')\n invent_lines = {}\n if context is None:\n context = {}\n for inventory in invent_obj.browse(cr, uid, context['active_ids'], context=context):\n if inventory.state == \"done\":\n raise osv.except_osv(_('Warning!'),\n _('Merging is only allowed on draft inventories.'))\n\n for line in inventory.inventory_line_id:\n key = (line.location_id.id, line.product_id.id, line.product_uom.id)\n if key in invent_lines:\n invent_lines[key] += line.product_qty\n else:\n invent_lines[key] = line.product_qty\n\n\n new_invent = invent_obj.create(cr, uid, {\n 'name': 'Merged inventory'\n }, context=context)\n\n for key, quantity in invent_lines.items():\n invent_line_obj.create(cr, uid, {\n 'inventory_id': new_invent,\n 'location_id': key[0],\n 'product_id': key[1],\n 'product_uom': key[2],\n 'product_qty': quantity,\n })\n\n return {'type': 'ir.actions.act_window_close'}", "def inventory_id(self, inventory_id):\n\n self._inventory_id = inventory_id", "def add(self, product):\n pass", "def save(self):\n\n conn = psycopg2.connect(\"dbname=postgres user=postgres password=postgres\")\n cursor = conn.cursor()\n\n # Insert\n cursor.execute(\n \"INSERT INTO inventory (name, health, power, intellect, dexterity) VALUES (%s, %s, %s, %s, %s) RETURNING id\",\n (self.name, self.health, self.power, self.intellect, self.dexterity),\n )\n\n conn.commit()\n cursor.close()\n conn.close()", "def add_furniture(itemcode, description, marketprice, rentalprice):\n\n material = input(\"Enter item material: \")\n size = input(\"Enter item size (S,M,L,XL): \")\n newitem = Furniture(itemcode, description,\n marketprice, rentalprice\n , material, size)\n FULLINVENTORY[itemcode] = newitem.returnasdictionary()\n print(\"New inventory item added\")", "def act(self, **kwargs):\n source_entity = kwargs[action.SOURCE_ENTITY]\n item = self._get_item_on_floor(source_entity)\n if item is None:\n raise Exception(\"Could not find item on floor.\", source_entity, item)\n pickup_succeded = self.parent.inventory.try_add(item)\n if pickup_succeded:\n item.remove_component_of_type(\"player_auto_pick_up\")\n msg.send_visual_message(messenger.PICK_UP_MESSAGE % {\"item\": item.description.name},\n source_entity.position.value)\n self.parent.actor.newly_spent_energy += gametime.single_turn\n _item_flash_animation(source_entity, item)", "def restock_inventory(inventory):\n for k in inventory.keys():\n inventory[k] = inventory[k] + 10\n return inventory", "def inventory(game):\n\n # Offset for displaying list on-screen\n x, y = 6, 2\n # Currently selected item\n selection = 0\n # Max number of items shown at once\n max_items = 10\n # Number of items scrolled through so far\n scrolled = 0\n # Offset for messages\n x_msg, y_msg = 2, max_items + 4\n\n game.window.clear()\n while True:\n # Draw selection cursor\n game.window.addstr(y + selection - scrolled, x - 4, CURSOR)\n\n # Get items between current scroll amount and max_items\n items = list(enumerate(game.player.items))[scrolled:scrolled+max_items]\n\n # Print each item in inventory\n for i, item in items:\n # If more than 1, put the quantity\n if item.quantity > 1:\n formatted = '{}: {} x {}\\n'.format(i, item.quantity, item.name)\n else:\n formatted = '{}: {}\\n'.format(i, item.name)\n\n game.window.addstr(i + y - scrolled, x, formatted)\n\n # If equipped, put a little star next to the item\n if item in game.player.equipment.values():\n game.window.addstr(i + y - scrolled, x - 2, '*')\n\n key = game.window.getkey()\n\n if key == 'k' or key == 'KEY_UP':\n if selection > 0:\n selection -= 1\n\n # If the user tries to go above the screen, scroll up by one\n if selection < scrolled:\n scrolled -= 1\n\n game.window.clear()\n\n if key == 'j' or key == 'KEY_DOWN':\n if selection < len(game.player.items) - 1:\n selection += 1\n\n # If the user tries to go below the screen, scroll down by one\n if selection > scrolled + max_items - 1:\n scrolled += 1\n\n game.window.clear()\n\n if key == 'e':\n # Equip the selected item\n if game.player.items[selection].equippable:\n game.player.equip(game.player.items[selection])\n game.window.clear()\n else:\n game.window.addstr(y_msg, x_msg, \"Cannot equip non-equippable item\")\n\n if key == 'c':\n # Eat the selected item\n if game.player.items[selection].kind == 'food':\n heal = game.player.items[selection].stats['hp']\n game.player.eat(game.player.items[selection])\n\n # Put selection cursor back to an item\n selection -= 1\n game.window.clear()\n\n game.window.addstr(y_msg, x_msg, \"Healed for {} hp\".format(heal))\n else:\n game.window.addstr(y_msg, x_msg, \"Cannot eat non-food item\")\n\n if key == 'l':\n # Print the item name and description\n item = game.player.items[int(selection)]\n game.window.addstr(y_msg, x_msg, '{}\\n\\n{}'.format(item.name, item.desc))\n\n if key == 'q':\n break\n\n if key == '?':\n help_inventory(game)\n continue", "def save_inventory(self, data, batch):\n logger.info('AddStockInventory inventory save initiated')\n with Transaction().start(DBNAME, 1) as transaction:\n transaction.context = config.get_config().context\n batch = batch\n location = self.Location.search(['name', '=', 'MyInventory'])[-1]\n inventory = self.Inventory()\n inventory.location = location\n inventory.batch_number = batch\n inventory.save()\n for i in data:\n product = self.Product.search([('code', '=', i['code']),\n ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]\n units = self.Uom.search(['name', '=', i['units']])[-1]\n supplier = self.Party.search(['name', '=', i['supplier']])[-1]\n inventory_line = self.InventoryLine()\n inventory_line.product = product\n inventory_line.quantity = float(i['quantity'])\n inventory_line.uom = units\n inventory_line.supplier = supplier\n inventory_line.expiry_date = i['expiry_date']\n inventory_line.inventory = inventory\n inventory_line.save()\n # transaction.cursor.commit()\n inventory.state = 'done'\n inventory.save()\n transaction.cursor.commit()\n return True", "def equip(self, item_name: str, quantity: int) -> None:\n raise_if_false(quantity >= 0, f\"Quantity [{quantity}] can't be negative\")\n raise_if_false(item_name in self._player_data.inventory, f\"Can't equip [{item_name}], not in inventory\")\n\n quantity_to_equip = min(quantity, self._player_data.inventory[item_name])\n if item_name in self._player_data.equipped_items:\n self._player_data.equipped_items[item_name] += quantity_to_equip\n else:\n self._player_data.equipped_items[item_name] = quantity_to_equip\n\n self.remove_from_inventory(item_name, quantity_to_equip)", "def add_to_bag(self, item):\n self._bag.append(item)", "def test_add_new_furniture(self):\n input_vars = ['4', 'Rug', '1', 'y', 'Berber', 's']\n inventory = {}\n with patch('builtins.input', side_effect=input_vars):\n main.add_new_item(inventory)\n self.assertEqual(inventory['4'],\n {\n 'product_code': '4',\n 'description': 'Rug',\n 'market_price': 24,\n 'rental_price': '1',\n 'material': 'Berber',\n 'size': 's'\n })", "def receive_item(self, item):\n self.inventory.append(item)\n events.trigger_event(\"print_message\", \"Picked up {0}\".format(item))", "def add_weapon(self, weapon):\n if self._weapon_i == -1:\n self._weapon_i = 0\n self.weapons.append(weapon)", "def addItem(*args):", "def addItem(*args):", "def addItem(*args):", "def build_inventory(self):\n self.inventory = {\n 'all': {\n 'hosts': [],\n 'vars': self.group_variables\n },\n '_meta': {'hostvars': {}}\n }\n\n # add all droplets by id and name\n for droplet in self.data['droplets']:\n for net in droplet['networks']['v4']:\n if net['type'] == 'public':\n dest = net['ip_address']\n else:\n continue\n\n self.inventory['all']['hosts'].append(dest)\n\n self.add_host(droplet['id'], dest)\n\n self.add_host(droplet['name'], dest)\n\n # groups that are always present\n for group in ('digital_ocean',\n 'region_' + droplet['region']['slug'],\n 'image_' + str(droplet['image']['id']),\n 'size_' + droplet['size']['slug'],\n 'distro_' + DigitalOceanInventory.to_safe(droplet['image']['distribution']),\n 'status_' + droplet['status']):\n self.add_host(group, dest)\n\n # groups that are not always present\n for group in (droplet['image']['slug'],\n droplet['image']['name']):\n if group:\n image = 'image_' + DigitalOceanInventory.to_safe(group)\n self.add_host(image, dest)\n\n if droplet['tags']:\n for tag in droplet['tags']:\n self.add_host(tag, dest)\n\n # hostvars\n info = self.do_namespace(droplet)\n self.inventory['_meta']['hostvars'][dest] = info", "def add_product(self, name, cost, stock, location):\n\n cur.execute(\"\"\"INSERT INTO catalogue(vendorname, productname, unitcost, stock, location) \n VALUES (?, ?, ?, ?, ?)\"\"\", (self.vendorname, name, cost, stock, location))", "def add_registry(self) -> None:\n\n # inits functions corresponding to user input and takes in url input\n item_options = {'n': self.inp_item_price, 'y': self.inp_book_prices}\n url = str(input(\"Enter URL to amazon item: \"))\n # validates url input - prevents inputting duplicate and/or blank URLs\n if(url == \"\" or url in self.load_links()[1]):\n print(\"Item not added - URL already exists or is blank\")\n return\n # user-input price(s) -> then -> validates price input \n prices = item_options.get(self.input_item_category())()\n try:\n for price in prices:\n float(price)\n except ValueError:\n print(\"Do not include any letters or symbols other than '.' - Item not added!\")\n return\n # writes input as a line of text to text file\n with open(URL_FILE, 'a') as text_file:\n text_file.write(self.format_string(url, prices))\n pass", "def _buy_and_add_ships(player, ships_list, game_data):\n\n # initialisation of the money of the player\n wallet = 100\n\n # separate all the ship bought\n for ship in ships_list.split(' '):\n # separate the name and the type of the ship\n if ship:\n name, ship_type = ship.split(':')\n # substract the price of the ship\n wallet -= game_data['ship_characteristics'][ship_type]['cost']\n if wallet >= 0:\n _add_ship(player, name, ship_type, game_data)", "def post(self, request, _format=None): # pylint: disable=unused-argument, no-self-use\n # Assign hotel_booking variables\n if '_TRANSACTION_TYPE_' in request.data['slots']:\n transaction_type = request.data['slots']['_TRANSACTION_TYPE_']['candidates'][0]['tokens']\n else:\n transaction_type = None\n\n if '_LOCATION_' in request.data['slots']:\n location = request.data['slots']['_LOCATION_']['candidates'][0]['tokens']\n else:\n location = None\n\n if '_PRICE_' in request.data['slots']:\n price = request.data['slots']['_PRICE_']['candidates'][0]['tokens']\n else:\n price = None\n\n # this loop sets all of the _SLOTS_ to have a `\"resovled\": 1` so they will be kept\n # through each turn of the conversation. Currently, each turn the slots are sent\n # with a `\"resolved\": -1`, so they need to be reset each time, however, they are\n # changing to be persistent based on their resolved status in an update coming soon\n for (slot, slot_data) in request.data['slots'].iteritems():\n if 'candidates' in request.data['slots'][slot]:\n for candidate in range(len(slot_data['candidates'])):\n request.data['slots'][slot]['candidates'][candidate]['resolved'] = 1\n if slot != '_DATE_':\n request.data['slots'][slot]['candidates'][candidate]['value'] = \\\n request.data['slots'][slot]['candidates'][candidate]['tokens']\n else:\n request.data['slots'][slot]['resolved'] = 1\n\n #magical API call to check their credit\n available_credit = check_available_credit()\n\n # state transition example\n # if someone does not have enough credit available to pay for the hotel, \n # redirect them to a credit_card_offer state, and return the payload\n if available_credit < price:\n request.data['state'] = 'credit_card_offer'\n return Response(request.data)\n\n if transaction_type == 'express deal':\n if location and price:\n # This is our magical API call to find express deals\n hotel = find_express_deal(location, price)\n if hotel:\n # This is how to add new _SLOTS_ to the business logic json\n hotel_rating = {\n \"candidates\": [\n {\n \"resolved\": 1,\n \"value\": hotel['hotel_rating']\n }\n ],\n \"required_matches\": \"EQ 1\",\n \"type\": \"string\"\n }\n request.data['slots']['_HOTEL_RATING_'] = hotel_rating\n hotel_type = {\n \"candidates\": [\n {\n \"resolved\": 1,\n \"value\": hotel['hotel_type']\n }\n ],\n \"required_matches\": \"EQ 1\",\n \"type\": \"string\"\n }\n request.data['slots']['_HOTEL_TYPE_'] = hotel_type\n\n # return the modified business logic payload\n return Response(request.data)", "def do_store(self, arg):\r\n\r\n # put this value in a more suitably named variable\r\n itemToStore = arg.lower()\r\n\r\n # get a list of all \"description words\" for each item in the inventory\r\n invDescWords = getAllDescWords(inventory)\r\n \r\n # Nice little easter egg :) \r\n if itemToStore == 'troll in bag':\r\n print(bcolors.start + \"You cannot put troll in bag, troll is a creature.\" + bcolors.end)\r\n return\r\n\r\n # find out if the player doesn't have that item\r\n if itemToStore not in invDescWords:\r\n print('%s does not exist in your inventory, the ground, africa or your pockets, what a shame.' % (itemToStore))\r\n return\r\n \r\n\r\n # get the item name that the player's command describes\r\n \r\n try:\r\n item = getFirstItemMatchingDesc(itemToStore, inventory)\r\n \r\n # broken currently, needs some work doing to check if the STORAGE value exists in the current room then store the item.\r\n if item != None:\r\n print('You store %s in a safe place.' % (worldItems[item][SHORTDESC]))\r\n inventory.remove(item)\r\n worldRooms[location][STORAGE].append(item)\r\n except KeyError:\r\n return(\"Don't even think about it buster brown.\")\r\n \r\n #item = getFirstItemMatchingDesc(itemToStore, inventory)\r\n #if item != None:\r\n # print('You store %s in a safe place.' % (worldItems[item][SHORTDESC]))\r\n # inventory.remove(item) # remove from inventory\r\n # worldRooms[location][STORAGE].append(item) # add to the container\r", "def add_edition(self, edition): \n self.editions[edition.id] = edition", "def add_guest(self, src: int, weight: float):\r\n if not self.has_guest(src):\r\n self.guests[src] = weight", "def equip(self):\n item_name = input(\"What item do you want to equip?\\n>\")\n if item_name in self.backpack:\n item = self.backpack[item_name]\n else:\n return \"You don't have this\"\n if item.type in self.equipped:\n self.equipped[item.type] = item\n if item.type == \"Weapon\":\n self.strength = item.strength\n return f\"You have equipped {item.name} on {item.type} item slot\"\n else:\n return \"You can not equip this\"", "def spill(self, agent):\n self.spill_list.append(agent)", "def add_to_inv(self, item):\n for obj in self.inv:\n if obj.name == item.name:\n self.inv[obj] += 1\n break\n else:\n self.inv[item] = 1", "def store_inventory(self, batch, location, quantity, inventory_stock):\n # no transaction needed\n logger.info('ReleaseDiscard store inventory initiated')\n to_inventory = self.Inventory.search([('location', '=', location.id), ('batch_number', '=', batch)])\n if to_inventory:\n return self.update_store(to_inventory[0], quantity, inventory_stock)\n inventory = self.Inventory()\n inventory.location = location\n inventory.batch_number = batch\n inventory.save()\n inventory_line = self.InventoryLine()\n inventory_line.product = inventory_stock.product\n inventory_line.quantity = float(quantity)\n inventory_line.uom = inventory_stock.uom\n inventory_line.supplier = inventory_stock.supplier\n inventory_line.expiry_date = inventory_stock.expiry_date\n inventory_line.inventory = inventory\n inventory_line.save()\n # transaction.cursor.commit()\n inventory.state = 'done'\n inventory.save()\n return True", "def add(self, cheese, index):\n self._stools[index].append(cheese)", "def add_item(self, item):\n self.items_with_price.update(item)" ]
[ "0.6268694", "0.6268613", "0.6260081", "0.6259757", "0.6220235", "0.62095225", "0.6174858", "0.6051948", "0.6041582", "0.6013446", "0.5951593", "0.59234303", "0.5922092", "0.5921586", "0.588076", "0.5787859", "0.5782276", "0.576559", "0.5759909", "0.5720281", "0.56820464", "0.56791717", "0.5660806", "0.56493336", "0.5630271", "0.56248707", "0.5614866", "0.56053007", "0.5572203", "0.55680066", "0.55533135", "0.55470806", "0.5546633", "0.54963243", "0.5440464", "0.54183614", "0.53834134", "0.5379165", "0.5359185", "0.53114164", "0.5303501", "0.5284207", "0.5278816", "0.5270841", "0.52706414", "0.52684975", "0.5263733", "0.5258396", "0.5242784", "0.52355933", "0.5235521", "0.52290887", "0.52266955", "0.5224494", "0.52133113", "0.51928484", "0.51913136", "0.51876026", "0.51816267", "0.5169223", "0.5146844", "0.51397353", "0.51376015", "0.5136391", "0.5128794", "0.5121584", "0.5099489", "0.50957525", "0.50847083", "0.50732875", "0.5057612", "0.5051857", "0.5047125", "0.5040762", "0.5040266", "0.50264716", "0.50246805", "0.5023805", "0.5017796", "0.5014076", "0.50105965", "0.50102663", "0.5009012", "0.50035465", "0.50035465", "0.50035465", "0.50032216", "0.49889675", "0.49853998", "0.4981699", "0.49799266", "0.49720973", "0.4971877", "0.49716908", "0.49608004", "0.4944624", "0.49285403", "0.49272907", "0.4925164", "0.49240753" ]
0.6929253
0
Cancel an existing reservation
async def cancel_reservation_endpoint(request): reservation_id = request.args["reservation_id"][0] model.cancel_reservation(reservation_id) return json({"success": True})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cancel_reservation(payload, clothes_id):\n selection = Reserve.query.filter_by(clothes_id=clothes_id).all()\n # if the given clothes has not been reserved, abort 404\n if len(selection) == 0:\n abort(404)\n # if two or more user reserved the same clothe, abort umprocessable\n if len(selection) >= 2:\n abort(422)\n # check if access user_id matches reservation user_id\n reservation = selection[0]\n # querying who is accessing and check role\n access_user = User.query.filter_by(auth0_id=payload['sub']).first()\n role = access_user.role\n # if user role is \"user\", check if access user_id matches\n # reservation user_id\n reservation_user = reservation.user\n if role == 'user' and access_user.id != reservation_user.id:\n raise AuthError({\n 'code': 'Invalid_claims',\n 'description': 'Unauthorized access by user'\n }, 401)\n\n # query clothes\n clothes = reservation.clothes\n\n # set error status\n error = False\n # cancel that reservation\n try:\n clothes.status = \"\"\n reservation.delete()\n formatted_clothes = clothes.format()\n formatted_user = reservation_user.format()\n except Exception:\n reservation.rollback()\n error = True\n print(sys.exc_info())\n finally:\n reservation.close_session()\n clothes.close_session()\n\n if error:\n abort(422)\n else:\n return jsonify({\n 'success': True,\n 'clothes': formatted_clothes,\n 'user': formatted_user\n })", "def cancel_room():\n try:\n user = User.get_user()\n except ValueError as err:\n return jsonify({\"error\": str(err)})\n\n booking_id = request.form.get(\"booking_id\")\n if not booking_id:\n return jsonify({\"error\": \"No booking id sent to server!\"})\n if \",\" in booking_id:\n return jsonify({\"error\": \"Only one booking may be cancelled at a time.\"})\n\n booking = StudySpacesBooking.query.filter_by(booking_id=booking_id).first()\n if booking:\n if (booking.user is not None) and (booking.user != user.id):\n return jsonify({\"error\": \"Unauthorized: This reservation was booked by someone else.\"}), 400\n if booking.is_cancelled:\n return jsonify({\"error\": \"This reservation has already been cancelled.\"}), 400\n\n if booking_id.isdigit():\n sessionid = request.form.get(\"sessionid\")\n if not sessionid:\n return jsonify({\"error\": \"No session id sent to server.\"}), 400\n try:\n wharton.delete_booking(sessionid, booking_id)\n save_wharton_sessionid()\n if booking:\n booking.is_cancelled = True\n sqldb.session.commit()\n else:\n save_booking(\n lid=1,\n email=user.email,\n booking_id=booking_id,\n is_cancelled=True,\n user=user.id\n )\n return jsonify({'result': [{\"booking_id\": booking_id, \"cancelled\": True}]})\n except APIError as e:\n return jsonify({\"error\": str(e)}), 400\n else:\n resp = studyspaces.cancel_room(booking_id)\n if \"error\" not in resp:\n if booking:\n booking.is_cancelled = True\n sqldb.session.commit()\n else:\n save_booking(\n email=user.email,\n booking_id=booking_id,\n is_cancelled=True,\n user=user.id\n )\n return jsonify({'result': resp})", "def cancel():", "def cancel(self):\r\n self.require_item()\r\n\r\n url = '{0}/cancel'.format(self.get_url())\r\n request = http.Request('PUT', url)\r\n request.use_xml = False\r\n\r\n return request, parsers.parse_empty", "def cancel(self):", "def cancel(self):", "def cancel(self):", "def cancel(self):\n self.session.rollback()", "def cancel_a_parcel(id):\n query = \"\"\"UPDATE parcels SET status = %s WHERE id = %s\"\"\"\n tuple =('canceled' , id)\n db.insert(query, tuple)", "def cancelbooking():\n # check of user is loggedin\n if 'loggedin' in session:\n if request.method == 'POST':\n bookingid = request.form['bookingid']\n\n response = requests.delete(\n \"http://localhost:8080/api/bookings/\"+str(bookingid))\n acc = json.loads(response.text)\n return redirect(url_for('site.bookings'))", "def cancel(self):\n self.__canceled = True", "def cancel(self):\n pass", "def test_cancelBooking(self):\n user_id = \"12\"\n car_id = \"6\"\n begin_date = \"2020-05-21\" \n begin_time = \"12:00:00\"\n\n begin_datetime = \"{} {}\".format(begin_date, begin_time) \n\n booking = db.session.query(Booking).filter( Booking.user_id == user_id,\n Booking.car_id == car_id,\n Booking.begin_time == begin_datetime).first()\n \n # Delete row from the database\n db.session.delete(booking)\n\n # Update car's availability \n car = Car.query.get(car_id)\n car.booked = False\n\n # Commit changes\n db.session.commit()\n self.assertFalse(self.bookingExists(user_id, car_id))", "def hook_cancel_assistance(self, data):\n request_id = data[\"request_id\"]\n assignee_chat_id = data[\"volunteer\"]\n log.info(\"CANCEL req:%s\", request_id)\n self.send_message(assignee_chat_id, c.MSG_REQUEST_CANCELED)\n\n self.updater.dispatcher.user_data[assignee_chat_id].update(\n {\"current_request\": None, \"reviewed_request\": None, \"state\": c.State.AVAILABLE}\n )\n del self.updater.dispatcher.bot_data[request_id]\n self.updater.dispatcher.update_persistence()", "def action_cancel(self):\n self.state = 'canceled'", "def do_cancel(self):\r\n self.write({'cancelled': True})", "def cancel(self):\n self.is_active = False\n self.save()", "def cancel(self):\n self.is_active = False\n self.save()", "def cancel_ride(self, cancel_reason: str, ride: dict) -> None:\n card: RideCard = self.ride_card_panel.surface_ride_card(ride)\n card.open_kebab_menu()\n card.kebab_menu.cancel_ride_button.click()\n\n self.cancellation_modal.cancel_ride(cancel_reason)", "def cancel_proposal(self, id: bytes, proposer: 'Address', current_block_height: int) -> None:\n if not self._check_registered_proposal(id):\n revert(\"No registered proposal\")\n\n proposal_info = ProposalInfo.from_bytes(self._proposal_list[id])\n\n if proposal_info.end_block_height < current_block_height:\n revert(\"This proposal has already expired\")\n\n if proposer != proposal_info.proposer:\n revert(\"No permission - only for proposer\")\n\n if proposal_info.status != NetworkProposalStatus.VOTING:\n revert(\"Can not be canceled - only voting proposal\")\n\n proposal_info.status = NetworkProposalStatus.CANCELED\n self._proposal_list[id] = proposal_info.to_bytes()", "def reservation_delete(token_user, res_id):\n res = Reservation.query.get(res_id)\n if res is None:\n abort(404, 'reservation not found')\n\n if not token_user.has_permission('reservation.delete.elevated'):\n is_my_reservation = any(map(lambda m: m.id == token_user.id,\n res.team.members))\n if not (is_my_reservation and\n token_user.has_permission('reservation.delete')):\n abort(403, 'insufficient permissions to delete reservation')\n\n get_db().delete(res)\n get_db().commit()\n\n return '', 204", "def _cancel(self):\n client = SBusClient(self.storlet_pipe_path)\n try:\n resp = client.cancel(self.task_id)\n if not resp.status:\n raise StorletRuntimeException('Failed to cancel task')\n except SBusClientException:\n raise StorletRuntimeException('Failed to cancel task')", "def cancel(self, membership, callback=None):", "def on_cancel(self):\n self.state = CANCELED\n self._reject()", "def cancel(self, cr, uid, ids, notes='', context=None):\n notes = \"\"\n u = self.browse(cr, uid, ids)[0].user_id.name\n notes = notes +'\\n'+'vehicle Cancelled at : '+time.strftime('%Y-%m-%d') + ' by '+ u \n self.write(cr, uid, ids, {'state':'cancel','notes':notes})\n return True", "def cancel(self):\n self.stop()\n self.make_callback('canceled')", "def canceled(self):\n self.reject()", "def cancel(self):\n self.cancelled = True", "def cancel(self):\n self.cancelled = True", "def landlord_button_cancel_tenancy(self):\n for record in self:\n self.write(\n {'state': 'cancelled', 'tenancy_cancelled': True})\n rent_ids = self.env['tenancy.rent.schedule'].search(\n [('tenancy_id', '=', record.id),\n ('paid', '=', False),\n ('move_check', '=', False)])\n for value in rent_ids:\n value.write({'is_readonly': True})\n return True", "def cancel(self, uuid):\n return self.__call__('market', 'tradecancel',\n {'orderId': uuid})", "def cancel(self):\n return self.RES_OK", "def cancel(self, update, context):\n\n output = \"\"\n user = self.User(update)\n message = update.message.text.lower().split(\" \")\n print(message[1], message[2])\n if user.id not in self.__users.keys():\n output = \"looks like you don't have any requests at all.\"\n elif message[1].isnumeric() and message[2].isnumeric():\n user = self.__users[user.id]\n line_num = int(message[1])\n station_num = int(message[2])\n found_match = False\n for station in user.stations:\n if station.line_number == line_num and station.station_number == station_num:\n user.remove_station(station)\n self.bus_controller.remove_person_from_the_station(station)\n output = \"Canceled the request\"\n found_match = True\n break\n if not found_match:\n output = \"this doesn't match with any of your active requests, so you can't cancel it.\\n\" \\\n \"make sure that you don't have any typing mistakes\"\n else:\n output = \"the values you entered seem wrong, the values must be number.\"\n self.data_base.log(user, update.message.text, output)\n user.send_message(output)", "def cancel_appointment(request, id):\n appointment = get_object_or_404(Appointment, pk=id)\n\n if request.POST:\n appointment.delete()\n messages.add_message(request, messages.SUCCESS, 'The appointment has been canceled successfully.')\n return redirect('view_appointments')\n\n return render(request, 'cancel_appointment.html', {'appointment': appointment})", "def cancel(self):\n self.succeeded = False\n self.reject()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def cancel(self):\r\n with sqlite3.connect('system.db') as conn:\r\n cursor = conn.cursor()\r\n if self.rec_id is not None:\r\n cursor.execute(\"DELETE FROM tblBookings WHERE recID=\" + str(self.rec_id))\r\n conn.commit()\r\n\r\n sql_num = \"SELECT numBookings FROM tblUsers WHERE userID=?\"\r\n user = (self.user_id,)\r\n cursor.execute(sql_num, user)\r\n sql_user = \"UPDATE tblUsers SET numBookings=\" + str(cursor.fetchone()[0] - 1) + \" WHERE userID=?\"\r\n cursor.execute(sql_user, user)\r\n conn.commit()\r\n\r\n room = (self.room_id,)\r\n cursor.execute(\"SELECT numOfBook FROM tblRooms WHERE roomID=?\", room)\r\n sql_room = \"UPDATE tblRooms SET numOfBook=\" + str(cursor.fetchone()[0] - 1) + \" WHERE roomID=?\"\r\n cursor.execute(sql_room, room)\r\n conn.commit()", "def cancel(request, slug, template='contacts/person/cancel.html'):\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n\n user = request.user\n if not user.has_perm('cancel_person'):\n return HttpResponseForbidden()\n\n try:\n person = Person.objects.get(slug__iexact=slug)\n except Person.DoesNotExist:\n raise Http404\n\n if request.method == 'POST':\n new_data = request.POST.copy()\n if new_data['cancel_person'] == 'Yes':\n if person.status == 'cancelled':\n person.status = 'pendent'\n person.status = calculaStatus(person)\n else:\n person.status = 'cancelled'\n person.user_modify = user\n person.save()\n\n return HttpResponseRedirect(person.get_absolute_url())\n\n\n kwvars = {\n 'object': person,\n }\n\n return render_to_response(template, kwvars, RequestContext(request))", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "async def test_cancel_booking_request(client):\n headers = { \n 'Authorization': 'Bearer special-key',\n }\n response = await client.request(\n method='DELETE',\n path='/vms/api/v1/bookingRequests/{booking_request_id}'.format(booking_request_id='booking_request_id_example'),\n headers=headers,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def cancel_game(self, req):\n game = models.BattleShip.getByUrlKey(req.url_key)\n game.cancelled = True\n game.put()\n return msgs.StringMessage(msg=\"Game has been cancelled.\")", "def _cancel(self, __button):\r\n\r\n self.destroy()", "def cancel(self): #$NON-NLS-1$\r", "def cancel(self):\n raise NotImplementedError(\n u\"%s: Method not implemented\", self.__class__.__name__)", "def do_cancel(order):\r\n self.gox.cancel(order.oid)", "def cancelRequest(self, json):\n uID = json.get('uID')\n print(RequestsDAO().getRequestByuID(uID))\n if not RequestsDAO().getRequestByuID(uID):\n return jsonify(Error=\"No request found\"), 404\n else:\n\n if uID:\n RequestsDAO().deleteRequest(uID)\n return jsonify(User=\"User deleted\"), 200\n else:\n return jsonify(Error=\"Unexpected attributes in update request\"), 400", "def cancel(self) -> None:\n c = self.pgconn.get_cancel()\n c.cancel()", "def test_cancel_shipment_old(self):\n pass", "def cancel(self):\n try: \n self.Scheduler.remove(self)\n except: pass\n self.Scheduler = None", "def cancel_policy(self, cancellation_cause=None, date_cursor=None):\n if not date_cursor:\n date_cursor = datetime.now().date()\n if not cancellation_cause:\n cancellation_cause = \"Policy was cancelled on demand\"\n self.policy.status = u'Canceled'\n self.policy.cancellation_date = date_cursor\n self.policy.status_info = cancellation_cause\n\n # mark all policy's invoices deleted ??\n\n db.session.commit()", "def cancel_game(self, request):\n game = get_by_urlsafe(request.urlsafe_game_key,Game)\n if not game:\n raise endpoints.NotFoundException('A Game with that key does not exist!')\n if game.game_over:\n raise endpoints.ForbiddenException('Game has ended.')\n else:\n game.key.delete()\n return StringMessage(message = 'Game Cancelled!')", "def cancel_instance(self, instance_id):\r\n return self.guest.deleteObject(id=instance_id)", "def delete_reservation(request, reservation_number):\n if not request.user.is_superuser:\n messages.error(request, \"Sorry, you don't have access to this \\\n part of the site.\")\n return redirect(reverse('home'))\n \n reservation = get_object_or_404(Reservation,\n reservation_number=reservation_number)\n reservation.delete()\n messages.info(request, f'Reservation with reservation number {reservation_number}\\\n has been successfully deleted.')\n return redirect('view_reservations')", "async def cancel(self, ctx):\n author: User = ctx.user_object\n\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n try:\n task = adv.get_adventure(ctx.author.id)\n\n adventureid = task[0]\n if adventureid == '0':\n if author.has_item_by_item(REAPER_TOKEN):\n author.update_inventory(REAPER_TOKEN, remove=True)\n adv.remove(ctx.author.id)\n out = 'Slayer task cancelled!'\n else:\n out = 'Error: You do not have a reaper token.'\n elif adventureid == '1':\n adv.remove(ctx.author.id)\n out = 'Killing session cancelled!'\n elif adventureid == '2':\n adv.remove(ctx.author.id)\n out = 'Quest cancelled!'\n elif adventureid == '3':\n adv.remove(ctx.author.id)\n out = 'Gather cancelled!'\n elif adventureid == '4':\n adv.remove(ctx.author.id)\n out = 'Clue scroll cancelled!'\n elif adventureid == '5':\n adv.remove(ctx.author.id)\n out = 'Reaper task cancelled!'\n elif adventureid == '6':\n adv.remove(ctx.author.id)\n out = 'Runecrafting session cancelled!'\n else:\n out = f'Error: Invalid Adventure ID {adventureid}'\n\n except NameError:\n out = 'You are not currently doing anything.'\n await ctx.send(out)", "def cancel(self, cr, uid, ids, notes='', context=None):\n notes = \"\"\n u = self.browse(cr, uid, ids)[0].user_id.name\n notes = notes +'\\n'+'Enviroment And Safety Archive Cancelled at : '+time.strftime('%Y-%m-%d') + ' by '+ u \n self.write(cr, uid, ids, {'state':'cancel','notes':notes})\n return True", "def _cancel(self, __button=None):\r\n\r\n self.destroy()", "def cancel(self):\n self.on_cancel()", "async def test_cancel_booking(client):\n headers = { \n 'Authorization': 'Bearer special-key',\n }\n response = await client.request(\n method='DELETE',\n path='/vms/api/v1/bookings/{booking_id}'.format(booking_id='booking_id_example'),\n headers=headers,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def _generate_cancel_method(scheduler_name, scheduler):\n def cancel():\n \"\"\"\n Cancel the task.\n :return:\n \"\"\"\n scheduler.remove(scheduler_name)\n\n return cancel", "def test_cancel_booking(self):\n date = datetime(2060, 3, 1, 11)\n\n booking = create_test_booking(self.user, date, date.hour)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'cancel': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)\n self.assertEqual(type(context[\"info\"]), CancellationAlert)\n\n for week in bookings:\n for row in week.rows:\n for block in row.blocks:\n self.assertEqual(type(block), BlockAvailable)" ]
[ "0.7251533", "0.6900359", "0.6353264", "0.63135654", "0.6305536", "0.6305536", "0.6305536", "0.6251353", "0.6242757", "0.6192752", "0.61905634", "0.6167782", "0.6162976", "0.61334383", "0.6130032", "0.6127152", "0.61173344", "0.61173344", "0.6115619", "0.6088514", "0.60856", "0.60571074", "0.6044834", "0.6020202", "0.6018942", "0.6014244", "0.60001314", "0.5993796", "0.5993796", "0.59929955", "0.59909916", "0.5968488", "0.5965468", "0.5963357", "0.5960993", "0.5960993", "0.5960993", "0.5960993", "0.59538", "0.59376276", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5919277", "0.5899546", "0.58914834", "0.5861485", "0.5845266", "0.58402866", "0.58389", "0.5838662", "0.58369493", "0.58366233", "0.57928854", "0.5790516", "0.57901424", "0.57834774", "0.57745576", "0.5768668", "0.5767253", "0.5762071", "0.5755747", "0.575446", "0.57354003", "0.57319707" ]
0.80371726
0
Add a new reservation
async def add_reservation_endpoint(request): hotel_id = request.args["hotel_id"][0] room_type = request.args["room_type"][0] arrival_date = request.args["arrival_date"][0] departure_date = request.args["departure_date"][0] status = request.args["status"][0] reservation_id = model.add_reservation(hotel_id, room_type, arrival_date, departure_date, status) if reservation_id == model.OPERATION_ERROR_RETURN_CODE: return json({"success": False}) return json({"success": True, "reservation_id": reservation_id})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reservation_add(token_user):\n if not json_param_exists('team_id') or \\\n not json_param_exists('room_id') or \\\n not json_param_exists('start') or \\\n not json_param_exists('end'):\n abort(400, 'one or more required parameter is missing')\n\n team_id = request.json['team_id']\n team = Team.query.get(team_id)\n if team is None:\n abort(400, 'invalid team id')\n\n if not (token_user.has_permission('reservation.create') and team.has_member(token_user)):\n abort(403)\n\n room_id = request.json['room_id']\n room = Room.query.get(room_id)\n if room is None:\n abort(400, 'invalid room id')\n\n start = parse_datetime(request.json['start'])\n end = parse_datetime(request.json['end'])\n if start is None or end is None:\n abort(400, 'cannot parse start or end date')\n\n if start >= end:\n abort(400, \"start time must be before end time\")\n\n res = Reservation(team=team, room=room, created_by=token_user,\n start=start, end=end)\n\n attempt_override = False\n if json_param_exists(\"override\") and isinstance(request.json[\"override\"], bool):\n attempt_override = request.json[\"override\"]\n\n conflict_status, conflicting_reservations = res.validate_conflicts()\n if conflict_status == Reservation.NO_CONFLICT:\n pass\n elif conflict_status == Reservation.CONFLICT_OVERRIDABLE:\n if attempt_override:\n # Delete conflicting reservations\n for conflict in conflicting_reservations:\n get_db().delete(conflict)\n else:\n return json.dumps({\"overridable\": True}), 409\n elif conflict_status == Reservation.CONFLICT_FAILURE:\n return json.dumps({\"overridable\": False}), 409\n\n get_db().add(res)\n get_db().commit()\n\n return '', 201", "def create_new_reservation():\n if not request.json:\n return jsonify({'error': 'no body supplied'}), 400\n\n # look up by date to see if any availability\n res_date = request.json.get('date', None)\n if not res_date:\n error = 'no reservation date supplied'\n flash(error, 'error')\n return jsonify({'error': error}), 400\n\n # check if res time present, if found, convert to DT object\n res_time = request.json.get('time', None)\n if not res_time:\n error = 'no reservation time supplied'\n flash(error, 'error')\n return jsonify({'error': error}), 400\n res_time = time_str_to_obj(res_time)\n\n open_inventory = session.query(Inventory).filter_by(date=res_date).all()\n if not open_inventory:\n error = 'no open inventory for date {}'.format(res_date)\n flash(error, 'error')\n return jsonify({'error': error})\n\n error = 'reservation invalid'\n for inv in open_inventory:\n for window in inv.windows:\n if window.current_res_count < window.max_res_count:\n # check if res date falls in current window\n window_start = time_str_to_obj(window.start_time)\n window_end = time_str_to_obj(window.end_time)\n\n # if requested res time is valid, update res count and save res\n if window_start <= res_time < window_end:\n window.current_res_count = window.current_res_count + 1\n session.add(window)\n\n res = Reservation(**request.json)\n session.add(res)\n resp = session.commit()\n if not resp:\n # send message to flask for creation by name\n flash('reservation for {} created'.format(request.json.get('name')), 'success')\n return jsonify({'message': 'reservation for {} created'.format(request.json.get('name'))})\n else:\n error = 'requested reservation time is not available in current inventory'\n else:\n error = 'current inventory window cannot accept additional reservations, please select different time'\n flash(error, 'error')\n return jsonify({'error': error}), 400", "def reservation(self, reservation):\n\n self._reservation = reservation", "def add_new_arrival(self):\n pass", "def create_reservation(self, gs_id, vehicle_id, user_id):\n\n # create the reservation\n reservation = Reservation(self.settings, gs_id, vehicle_id, user_id)\n status, model = reservation.create()\n\n # return status\n if status:\n json_res = model.to_json()\n return True, json_res\n else:\n return False, None", "def validate_and_save(self, reservation, form):\n if not reservation.validate():\n context_data = self.get_context_data(reservation=reservation)\n context_data[\"error\"] = self.get_error_message(form, reservation)\n return render(self.request, self.template_name, context_data)\n\n reservation.save()\n return redirect(calendar_url_reservation(reservation))", "def add_booking(user_id, rest_id, number_of_people, booking_datetime, table_id, entrance_datetime=None):\r\n try:\r\n booking = Booking()\r\n booking.restaurant_id = rest_id\r\n booking.user_id = user_id\r\n booking.booking_datetime = booking_datetime\r\n booking.entrance_datetime = entrance_datetime\r\n booking.number_of_people = number_of_people\r\n booking.table_id = table_id\r\n booking.datetime = datetime.datetime.now()\r\n db.session.add(booking)\r\n db.session.commit()\r\n return booking.id\r\n except:\r\n db.session.rollback()\r\n return None", "def save_car_reservation(car_id, username, date_from, date_to):\n car = get_car_identified_by_id(car_id)\n price = calc_total_price(car.price, date_from, date_to)\n session = start_session()\n new_car_reservation = CarReservation(car_id, username, date_from, date_to, price)\n session.add(new_car_reservation)\n session.commit()\n queryset = session.query(CarReservation).filter(and_(CarReservation.id_car.__eq__(car_id),\n CarReservation.id_user.__eq__(username),\n CarReservation.date_from.__eq__(date_from),\n CarReservation.date_to.__eq__(date_to),\n CarReservation.price.__eq__(price)))\n reservation = queryset2list(queryset)[0]\n session.close()\n return reservation.id_reservation", "def create_reservations(payload, user_id):\n error = False\n # get posted data from json request\n body = request.get_json()\n keys = body.keys()\n # if request does not have json body, abort 400\n if body is None:\n abort(400)\n # if json does not have key 'auth0_id', abort 400\n if 'auth0_id' not in keys:\n abort(400)\n # if json does not have key 'reservation', abort 400\n if 'reservations' not in keys:\n abort(400)\n # if auth0_id in body does not match auth0_id in payload, abort 401\n if body['auth0_id'] != payload['sub']:\n abort(401)\n\n # query who is accessing\n access_user = User.query.filter_by(auth0_id=payload['sub']).first()\n # check if user_id in URL matches the access user id\n if user_id != access_user.id:\n raise AuthError({\n 'code': 'Invalid_claims',\n 'description': 'Unauthorized access by user'\n }, 401)\n\n # query clothes and store them in variable \"clothes\"\n if not isinstance(body['reservations'], list):\n abort(400)\n for value in body['reservations']:\n if not isinstance(value, int):\n abort(400)\n # check if all clothes indeed exist\n clothes = []\n for clothes_id in body['reservations']:\n # query clothes\n selection = Clothes.query.get(clothes_id)\n if selection is None:\n abort(404)\n # if that clothes has been already reserved, abort 422\n if selection.status == \"reserved\":\n abort(422)\n clothes.append(selection)\n\n # query user\n user = User.query.get(user_id)\n formatted_user = user.format()\n\n # make reservations\n try:\n reservations = []\n formatted_clothes = []\n for item in clothes:\n new_reservation = Reserve()\n new_reservation.user = user\n new_reservation.clothes = item\n item.status = \"reserved\"\n reservations.append(new_reservation)\n # commit these reservations\n for reservation in reservations:\n reservation.insert()\n formatted_clothes.append(reservation.clothes.format())\n except Exception:\n # rollback all sessions\n for reservation in reservations:\n reservation.rollback()\n error = True\n print(sys.exc_info())\n finally:\n # close all sessions\n for reservation in reservations:\n reservation.close_session()\n\n if error:\n abort(422)\n else:\n return jsonify({\n 'success': True,\n 'clothes': formatted_clothes,\n 'user': formatted_user\n })", "def save(self, *args, **kwargs):\n if not self.pk:\n self.start_time_rent = datetime.date.today()\n self.end_time_rent = self.start_time_rent + datetime.timedelta(days=7)\n self.reservation.isrented = True\n self.reservation.save()\n return super(Rental, self).save(*args, **kwargs)", "def add_booking():\n try:\n \n carid = request.form[\"carid\"]\n userid = request.form[\"userid\"]\n fromdate = request.form[\"fromdate\"].strip()\n todate = request.form[\"todate\"].strip()\n\n print(fromdate, \"|\", todate)\n\n car = Car.query.get(carid)\n car.isavailable = False\n\n user = User.query.get(userid)\n user_email = user.email\n\n fromdate_obj = datetime.datetime.strptime(fromdate, '%Y-%m-%d')\n todate_obj = datetime.datetime.strptime(todate, '%Y-%m-%d')\n \n summary = \"Car Booking. Car id: \" + carid\n\n cal = CalendarUtil()\n resp = cal.addToCalendar(user_email, fromdate_obj, todate_obj, summary)\n cal_event_id = resp['id']\n booking = Booking(carid=carid, userid=userid, fromdate=fromdate, todate=todate, caleventid= cal_event_id, isactive=True)\n\n test = db.session.add(booking)\n db.session.commit()\n return bookingSchema.jsonify(booking)\n except Exception as ex:\n print(\"Failed to add event to calender. Exception: \", str(ex))\n return jsonify(None)", "def add_ip_reservation(self, body, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async'):\n return self.add_ip_reservation_with_http_info(body, **kwargs)\n else:\n (data) = self.add_ip_reservation_with_http_info(body, **kwargs)\n return data", "def reservation_update(token_user, res_id):\n if not json_param_exists('room_id') or \\\n not json_param_exists('start') or \\\n not json_param_exists('end'):\n abort(400, 'one or more required parameter is missing')\n\n room_id = request.json['room_id']\n room = Room.query.get(room_id)\n if room is None:\n abort(400, 'invalid room id')\n\n start = parse_datetime(request.json['start'])\n end = parse_datetime(request.json['end'])\n if start is None or end is None:\n abort(400, 'cannot parse start or end date')\n\n res = Reservation.query.get(res_id)\n if res is None:\n abort(400, 'invalid reservation id')\n\n if not token_user.has_permission('reservation.update.elevated'):\n is_my_reservation = any(map(lambda m: m.id == token_user.id,\n res.team.members))\n if not (is_my_reservation and\n token_user.has_permission('reservation.update')):\n abort(403, 'insufficient permissions to update reservation')\n\n res.room = room\n res.start = start\n res.end = end\n\n attempt_override = False\n if json_param_exists(\"override\") and isinstance(request.json[\"override\"], bool):\n attempt_override = request.json[\"override\"]\n\n conflict_status, conflicting_reservations = res.validate_conflicts()\n if conflict_status == Reservation.NO_CONFLICT:\n pass\n elif conflict_status == Reservation.CONFLICT_OVERRIDABLE:\n if attempt_override:\n # Delete conflicting reservations\n for conflict in conflicting_reservations:\n get_db().delete(conflict)\n else:\n return json.dumps({\"overridable\": True}), 409\n elif conflict_status == Reservation.CONFLICT_FAILURE:\n return json.dumps({\"overridable\": False}), 409\n\n get_db().commit()\n\n return '', 204", "def add_reservation(self, src, dst,duration, bandwidth):\n \n # locks the self.current_reservations data structure. This is done\n # because there is a thread that could access it concurrently.\n with self.update_lock:\n\n # PART 1, TASK 3.4 check if there is an existing reservation for (src,dst). \n # you can use the self.current_reservations dictionary to check it.\n # If the reservation exists get the path and bw and update the links capacity \n # data structure using `self.add_link_capacity(path, bw)`\n \n # PART 1, TASK 3.1. Once get_available_path is implemented call it to get a path.\n path = self.get_available_path(src, dst, bandwidth)\n\n # PART 1, TASK 3.2 If there is an available path \n if path: \n pass\n # PART 1, TASK 3.2 Get mpls stack of labels\n\n # PART 1, TASK 3.3 get:\n # 1) ingress switch name\n # 2) action name using `mpls_ingress_x_hop` set x as number of labels\n # 3) src and dst ips (your match)\n # 4) make sure all your labels are strings and use them as action parameters\n\n # PART 1, TASK 3.4\n\n # check if its a new or an existing reservation (to update)\n\n # add entry or modify\n # PART 2 TASK 1.4 Configure the associated meter properly.\n\n # update controllers data structures: self.current_reservation & self.links_capacity\n \n\n # PART 1, TASK 3.2 otherwise we print no path available\n else:\n # PART 1, task 4.3 if we dont find a path but the reservation existed\n # you have to erase it while making sure you update links_capacity accordingly \n print(\"\\033[91mRESERVATION FAILURE: no bandwidth available!\\033[0m\")", "def save(self, *args, **kwargs):\n if not self.pk:\n self.start_time_booking = datetime.date.today()\n self.end_time_booking = self.start_time_booking + datetime.timedelta(days=5)\n self.cars.quantity -= 1\n self.cars.save()\n return super(Reservation, self).save(*args, **kwargs)", "def form_valid(self, form, **kwargs):\n reservation = Reservation(start_time=form.cleaned_data[\"start_time\"],\n end_time=form.cleaned_data[\"end_time\"], user=self.request.user,\n machine=form.cleaned_data[\"machine\"], comment=form.cleaned_data[\"comment\"])\n\n if form.cleaned_data[\"event\"]:\n reservation.event = form.cleaned_data[\"event\"]\n\n if form.cleaned_data[\"special\"]:\n reservation.special = True\n reservation.special_text = form.cleaned_data[\"special_text\"]\n\n return self.validate_and_save(reservation, form)", "def schedule_reservation(reservation_date,reservation_time,party_size,restaurant_name,first_name,restaurant_address):\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('calendar', 'v3', credentials=creds)\n\n # Call the Calendar API\n now = datetime.datetime.utcnow()\n\n reservation_day=reservation_date.split('/')[0]\n reservation_month =reservation_date.split('/')[1]\n reservation_year =reservation_date.split('/')[2]\n reservation_date = reservation_year+'-'+reservation_month+'-'+reservation_day\n start_time_hr= reservation_time[:2]\n end_time_hr= int(reservation_time[:2])+4\n start_time_min= reservation_time[2:]\n end_time_min=start_time_min\n \n \n event = {\n 'summary': 'Reservation at '+restaurant_name,\n 'location': restaurant_address,\n 'description': 'Reservation for '+party_size+' under '+first_name+' made on '+str(now),\n 'start': {\n 'dateTime': reservation_date+'T'+start_time_hr+':'+start_time_min+':00+08:00',\n 'timeZone': 'Asia/Singapore',\n },\n 'end': {\n 'dateTime': reservation_date+'T'+str(end_time_hr)+':'+end_time_min+':00+08:00',\n 'timeZone': 'Asia/Singapore',\n },\n 'reminders': {\n 'useDefault': False,\n 'overrides': [\n {'method': 'email', 'minutes': 24 * 60},\n {'method': 'popup', 'minutes': 10},\n ],\n },\n }\n\n event = service.events().insert(calendarId='primary', body=event).execute()\n print ('Event created: %s', (event.get('htmlLink')))", "def reservs(request):\n a = request.GET\n print(a)\n if request.method == 'POST':\n # create a form\n form = NewReservationsOfficesForm(data=request.POST)\n if form.is_valid():\n form.save()\n return redirect('coworkings:index')\n else:\n form = NewReservationsOfficesForm()\n\n context = {\"form\": form}\n return render(request, 'coworkings/reservs.html', context)", "def addToReservation():\n\n def fits(x, y):\n \"\"\"\n Check if a job shape's resource requirements will fit within a given node allocation\n \"\"\"\n return y.memory <= x.memory and y.cores <= x.cores and y.disk <= x.disk\n\n def subtract(x, y):\n \"\"\"\n Adjust available resources of a node allocation as a job is scheduled within it.\n \"\"\"\n return Shape(x.wallTime, x.memory - y.memory, x.cores - y.cores, x.disk - y.disk)\n\n def split(x, y, t):\n \"\"\"\n Partition a node allocation into two\n \"\"\"\n return (Shape(t, x.memory - y.memory, x.cores - y.cores, x.disk - y.disk),\n NodeReservation(Shape(x.wallTime - t, x.memory, x.cores, x.disk)))\n\n i = 0 # Index of node reservation\n while True:\n # Case a new node reservation is required\n if i == len(nodeReservations):\n x = NodeReservation(subtract(nodeShape, jS))\n nodeReservations.append(x)\n t = nodeShape.wallTime\n while t < jS.wallTime:\n y = NodeReservation(x.shape)\n t += nodeShape.wallTime\n x.nReservation = y\n x = y\n return\n\n # Attempt to add the job to node reservation i\n x = nodeReservations[i]\n y = x\n t = 0\n \n while True:\n if fits(y.shape, jS):\n t += y.shape.wallTime\n \n # If the jS fits in the node allocation from x to y\n if t >= jS.wallTime:\n t = 0\n while x != y:\n x.shape = subtract(x.shape, jS)\n t += x.shape.wallTime\n x = x.nReservation\n assert x == y\n assert jS.wallTime - t <= x.shape.wallTime\n if jS.wallTime - t < x.shape.wallTime:\n x.shape, nS = split(x.shape, jS, jS.wallTime - t)\n nS.nReservation = x.nReservation\n x.nReservation = nS\n else:\n assert jS.wallTime - t == x.shape.wallTime\n x.shape = subtract(x.shape, jS)\n return \n \n # If the job would fit, but is longer than the total node allocation\n # extend the node allocation\n elif y.nReservation == None and x == nodeReservations[i]:\n # Extend the node reservation to accommodate jS\n y.nReservation = NodeReservation(nodeShape)\n \n else: # Does not fit, reset\n x = y.nReservation\n t = 0\n \n y = y.nReservation\n if y is None:\n # Reached the end of the reservation without success so stop trying to\n # add to reservation i\n break\n i += 1", "def add_new_item(self, request, *a, **kw):\n item_def = request.data\n cpdoc = self.get_object()\n item_def['calendar_plan'] = cpdoc.id\n\n item_ser = self.get_serializer(data=item_def)\n item_ser.is_valid(raise_exception=True)\n item_obj = item_ser.save()\n\n headers = self.get_success_headers(item_ser.data)\n return response.Response(item_ser.data, headers=headers)", "def post(self, flight_id):\n data = request.get_json()\n seat = 1\n if data:\n seat = data.get('seat')\n current_user = get_jwt_identity()\n try:\n flight = get_flight(flight_id)\n if not flight:\n return generate_response('Selected flight not available', 400)\n\n if seat == 1 and flight.booked_economy < flight.airplane.economy_seats:\n data = dict(booked_economy=flight.booked_economy+1)\n save_booking(current_user, flight_id)\n flight.update(flight, **data)\n return generate_response('Economy seat flight reservation successfull', 201)\n\n if seat == 2 and flight.booked_business < flight.airplane.business_seats:\n data = dict(booked_business=flight.booked_business+1)\n save_booking(current_user, flight_id)\n flight.update(flight, **data)\n return generate_response('Business seat flight reservation successfull', 201)\n\n except Exception as e:\n db.session.rollback()\n return jsonify({'error': str(e)}), 401", "def insert_reservation(house, id, check_in_date, check_in_time, check_out_date, guest_name,\n guest_cell, guest_telegram, num_guest, comment, confirm):\n sql = \"\"\"INSERT INTO %s VALUES(%s, '%s', '%s', '%s', '%s', '%s', '%s', %s, '%s', %s) RETURNING reservation_id;\"\"\"\n conn = None\n reservation_id = None\n try:\n # read database configuration\n params = config()\n # connect to the PostgreSQL database\n conn = psycopg2.connect(**params)\n # create a new cursor\n cur = conn.cursor()\n # execute the INSERT statement\n print(sql % (house, id, check_in_date, check_in_time, check_out_date, guest_name,\n guest_cell, guest_telegram, num_guest, comment, confirm))\n cur.execute(sql, (house, id, check_in_date, check_in_time, check_out_date, guest_name,\n guest_cell, guest_telegram, num_guest, comment, confirm))\n # get the generated id back\n vendor_id = cur.fetchone()[0]\n # commit the changes to the database\n conn.commit()\n # close communication with the database\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n\n return reservation_id", "def addBooking(self, booking):\n self.bookings.addBooking(booking.getID())", "def reservation(self):\n return self.request.get('reservation', None)", "def SaveToReservationSQL(self, order, recipientid):\n\n # get counter, increase it, save counter, and use for reservation\n # managerid, recipientid,\n # insert reservation \\'{reservation_guid}\\\n # insert people\n # insert subclaim\n\n cursor = self.cursor\n\n reservation_guid = order[\"id\"]\n km_number = order[\"crmid\"]\n cursor.execute('select TOP 1 id from reservation where trash=0 and (guid=? or ndog=?) order by id desc',\n (reservation_guid, km_number))\n\n row = cursor.fetchone()\n if (not row):\n reservation_new = 1\n reservationid = None\n else:\n reservation_new = 0\n reservationid = row[0]\n\n # check subclaims\n # reservation_to_delete=row[0]\n # query='select id from subclaim where claimid=?'\n # cursor.execute(query,reservation_to_delete)\n # rows=cursor.fetchall()\n # if rows :\n # query='select number from reservation where id=?'\n # cursor.execute(query,reservation_to_delete)\n # row = cursor.fetchone()\n # self.number = row[0]\n\n # TODO - update existing reservation\n # return 0\n\n # query='update reservation set trash=1 where id=?'\n # cursor.execute(query,reservation_to_delete)\n\n # create reservation if it is missing\n\n if reservation_new == 0:\n\n cursor.execute('select number from reservation where id=? and trash=0', reservationid)\n row = cursor.fetchone()\n number = row[0]\n self.number = number\n\n else:\n number = km_number\n self.number = number\n\n print('Dogovor number ', number, 'KM', km_number, 'reservationid ', reservationid)\n\n manager_guid = order[\"manager\"][\"id\"]\n query = f'select id from recipient where guid=\\'{manager_guid}\\''\n cursor.execute(query)\n row = cursor.fetchone()\n humanid = row[0]\n\n guid = order[\"id\"]\n currency = order[\"cruises\"][0][\"currency\"]\n print(currency)\n\n date_created = datetime.fromisoformat(order[\"created\"][:order[\"created\"].find('.')])\n\n query = '''\ninsert into dbo.[reservation]\n([number], [cdate], [recipientid], [humanid], [officeid], [legalid], [statusid],\n [pdate], [currencyid],[ndog],[guid])\nvalues (?,?,?,?,?,?,?,?,?,?,?)\n'''\n\n # TODO officeid by manager, legalid by owner, statusid?\n ## if reservation is not exist create new, else update\n values = (\n km_number, date_created, recipientid, humanid, 29921, 136, 2, date_created, currencymap[currency],\n order[\"crmid\"],\n guid)\n print(values)\n if (reservation_new == 1) and (km_number):\n cursor.execute(query, values)\n cursor.execute(\"select IDENT_CURRENT('reservation')\")\n row = cursor.fetchone()\n id = row[0]\n cursor.execute('exec ChangesLog_AddNew ?,?,?,?,?,?,?,?,?,?,?,?,?', (\n 'robot python', 1, 'reservation', id, km_number, 'reservation', id, str(id), None, None, '', None, ''))\n\n\n elif (reservation_new == 0) and (km_number):\n update_query = \"\"\" update dbo.[reservation] \n set cdate = ?, recipientid=?, humanid = ?, officeid=?, legalid=?, statusid=?, pdate=?, currencyid=?, guid =?, ndog = ? where id=?\"\"\"\n cursor.execute(update_query, (\n date_created, recipientid, humanid, 29921, 136, 2, date_created, currencymap[currency], guid, km_number,\n reservationid))\n id = reservationid\n else:\n id = 0\n return id, reservation_new", "def __init__(__self__,\n resource_name: str,\n args: Optional[ReservationArgs] = None,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def reservation(self):\n return self._reservation", "def reservation_mark_entrance(user: User, reservation: Reservation):\n owns_restaurant = reservation.restaurant.operator == user\n if owns_restaurant and reservation.status is ReservationState.ACCEPTED and reservation.reservation_time <= datetime.datetime.now():\n #Might want to add user notification\n reservation.entrance_time = datetime.datetime.now()\n reservation.status = ReservationState.SEATED\n db.session.commit()\n return True\n\n return False", "def form_valid(self, form, **kwargs):\n reservation = kwargs[\"reservation\"]\n # The user is not allowed to change the machine for a reservation\n if reservation.machine != form.cleaned_data[\"machine\"]:\n return redirect(\"my_reservations\")\n\n reservation.comment = form.cleaned_data[\"comment\"]\n\n reservation.start_time = form.cleaned_data[\"start_time\"]\n reservation.end_time = form.cleaned_data[\"end_time\"]\n if reservation.event:\n reservation.event = form.cleaned_data[\"event\"]\n\n if reservation.special:\n reservation.special_text = form.cleaned_data[\"special_text\"]\n\n return self.validate_and_save(reservation, form)", "def room_add():\n if not json_param_exists('number'):\n abort(400, 'invalid room number')\n\n if not isinstance(request.json['number'], str):\n abort(400, 'room number must be string')\n\n num = request.json['number']\n room = Room(number=num)\n\n try:\n get_db().add(room)\n get_db().commit()\n except IntegrityError:\n abort(409, 'room number is already in use')\n return json.dumps(room.as_dict(include_features=False)), 201", "async def add_inventory_endpoint(request):\n hotel_id = request.args[\"hotel_id\"][0]\n room_type = request.args[\"room_type\"][0]\n room_inventory = request.args[\"room_inventory\"][0]\n model.add_inventory(hotel_id, room_type, room_inventory)\n return json({\"success\": True})", "def create_new_availability():\n if request.method == 'POST':\n doctor_email = request.form['doctor_email']\n date = request.form['date']\n start_time = request.form['start_time']\n end_time = request.form['end_time']\n\n response_add_availability = requests.post(server_url + 'doctor/add_availability', json={\n 'doctor_email': doctor_email,\n 'date': date,\n 'start_time': start_time,\n 'end_time': end_time\n })\n response_add_availability = response_add_availability.json()\n\n if response_add_availability.get('Status') == \"ALREADY_AVAILABILITY_SET\":\n return render_template('doctors/availability_failed.html')\n else:\n referer = request.referrer\n return render_template('doctors/availability_success.html', referer=referer)\n else:\n return render_template('doctors/dashboard.html')", "def create_booking(self, request):\n model = AssignmentModelFromDynamo('assignment', 'params')\n\n model.save()\n\n return model", "def test_reservation_id_one_instance(self):\n (refs, resv_id) = self.compute_api.create(self.context,\n self.default_flavor,\n image_href=uuids.image_href_id)\n self.assertEqual(len(refs), 1)\n self.assertEqual(refs[0]['reservation_id'], resv_id)", "async def add_hotel_endpoint(request):\n hotel_name = request.args[\"hotel_name\"][0]\n hotel_id = model.add_hotel(hotel_name)\n return json({\"hotel_id\": hotel_id})", "def add_event():\n\n business = request.form.get('bus_name')\n name_evt = request.form.get('name_evt')\n\n start = request.form.get('start')\n end = request.form.get('end')\n description = request.form.get('description')\n\n #TODO might run into service option problems\n # service = request.form.get('service')\n\n #business = get bus_id from session?\n\n # new_evt = crud.create_event(name_evt, start, end, description, service, business)\n\n # return redirect('/')\n \n return render_template('add_evts.html')", "def confirm_car_reservation():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n today = datetime.date.today()\n if request.method == 'POST':\n car_id = request.form['hidden-car-id']\n car = get_car_identified_by_id(car_id)\n date_from = request.form['hidden-date-from']\n date_to = request.form['hidden-date-to']\n if not are_dates_valid(date_from, date_to):\n if check_authentication(session_id, user_id):\n return render_template('car_details.html', car=car, error=\"Please insert a valid date interval!\",\n user=user_id, session_id=session_id, today=today)\n else:\n return render_template('car_details.html', car=car, error=\"Please insert a valid date interval!\", today=today)\n if is_car_available_in_the_selected_period(date_from, date_to, car_id):\n if check_authentication(session_id, user_id):\n if has_user_age_requirement(user_id, car_id):\n reservation_id = save_car_reservation(car_id, user_id, date_from, date_to)\n return render_template('car_reservation_details.html', user=user_id, session_id=session_id,\n reservation_id=reservation_id, car=car, date_from=date_from, date_to=date_to,\n total_price=calc_total_price(car.price, date_from, date_to),\n reservation_just_completed=True)\n else:\n error_msg = \"The reservation has failed because you are not at least \" + str(car.min_age) +\\\n \" years old!\"\n return render_template('car_details.html', user=user_id, session_id=session_id,\n error=error_msg, car=car, today=today)\n else:\n return render_template('car_details.html', car=car,\n error=\"You need to be authenticated in order to complete this action!\", today=today)\n else:\n if check_authentication(session_id, user_id):\n return render_template('car_details.html', car=car, is_available=False, show_confirm_div=True,\n date_from=date_from, date_to=date_to, user=user_id, session_id=session_id, today=today)\n else:\n return render_template('car_details.html', car=car, is_available=False, show_confirm_div=True,\n date_from=date_from, date_to=date_to, today=today)\n else:\n if check_authentication(session_id, user_id):\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), user=user_id,\n session_id=session_id, authjs=False, preview_length=get_cars_preview().__len__())\n else:\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=False,\n preview_length=get_cars_preview().__len__(), del_session_cookie=True)", "def booking(self, customer, room):\n self.room[room] = customer\n return True", "def register_plane_ticket(self,\n origin, \n destination, \n departure_date, \n arrival_date, \n number_of_rooms, \n only_departure,\n guest_ages):\n\n self.client.service.registerPlaneTicket(origin, destination, departure_date[0], departure_date[1], departure_date[2],\n arrival_date[0], arrival_date[1], arrival_date[2], number_of_rooms, only_departure,\n guest_ages)", "def add_task(self):\n print('Enter task')\n text_task = input()\n print('Enter deadline')\n new_task = self.Table(task=text_task, deadline=datetime.strptime(input(), '%Y-%m-%d'))\n self.session.add(new_task)\n self.session.commit()\n print('The task has been added!')\n print()", "def post(self, current_user):\n data = request.json\n origin = data['origin']\n destination = data['destination']\n date = data['date']\n \n ride = Ride(origin=origin, destination=destination, date=date)\n try:\n all_rides = ride.fetch_all()\n for this_ride in all_rides:\n if this_ride['origin'] == ride.origin and this_ride['destination'] == ride.destination and this_ride['date'] == ride.date and this_ride['driver'] == current_user[2]:\n response = {\n 'message': 'This ride already exists.',\n }\n return make_response(jsonify(response)), 202\n driver = current_user[2]\n ride.insert(driver)\n\n response = {\n 'message': 'You offered a ride successfully.',\n }\n return make_response(jsonify(response)), 201\n\n except Exception as e:\n response = {\n 'message': str(e)\n }\n return make_response(jsonify(response)), 500", "def on_add_clicked(self):\n selected_indexes = self.ui.availListView.selectedIndexes()\n for index in selected_indexes:\n row = self.availModel.itemFromIndex(index).row()\n #rowList = self.availModel.takeRow(row)\n student = self.availModel.item(row, 0).text()\n sid = self.availModel.item(row, 1).text()\n try:\n # Actually add the student for the date into the database\n self.db.student_attend(sid, self.date_string)\n except KeyError:\n # Display error window if student missing\n err_msg = QtGui.QErrorMessage()\n err_msg.showMessage(\"Sid not found for student %s\" % student)\n\n self.update_views()", "def post(self):\n args = self.reqparse.parse_args()\n check_for_empty_fields(args)\n car = Car(args['registration'],\n args['model'], args['capacity'])\n return car.add()", "def add_event(title, release_date, rating = None):\n store = file.Storage('token.json')\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets('credentials.json', SCOPES)\n creds = tools.run_flow(flow, store)\n service = build('calendar', 'v3', http=creds.authorize(Http()))\n # Call the Calendar API\n event = service.events().insert(calendarId='primary', body={\n 'summary': '%s Movie' % title,\n 'description': rating,\n 'backgroundColor': 'Tomato',\n 'foregroundColor': 'Tomato',\n 'colorId': 11,\n 'start': {\n 'dateTime': (release_date + relativedelta(hours=16)).isoformat() + 'Z',\n 'timeZone': 'America/Chicago',\n },\n 'end': {\n 'dateTime': (release_date + relativedelta(hours=17)).isoformat() + 'Z',\n 'timeZone': 'America/Chicago',\n },\n 'recurrence': [\n 'RRULE:FREQ=DAILY;COUNT=1'\n ],\n 'reminders': {\n 'useDefault': 'useDefault',\n 'overrides': [\n {'method': 'email', 'minutes': 24 * 60},\n {'method': 'popup', 'minutes': 10}\n ],\n },\n }).execute()", "def add_vehicle():\n form = VehicleForm()\n if form.validate_on_submit():\n try:\n new_info = movr.add_vehicle(vehicle_type=form.vehicle_type.data,\n longitude=form.longitude.data,\n latitude=form.latitude.data,\n battery=form.battery.data)\n except IntegrityError as e:\n return render_error_page(e, movr)\n vehicle_id = new_info['vehicle_id']\n\n # check to verify that vehicle was added\n new_vehicle = movr.get_vehicle(vehicle_id)\n if new_vehicle is None: # Insert didn't work\n flash((\"Vehicle with id `{}` \"\n \"NOT successfully added. Edit add_vehicle_txn in \"\n \"movr/transactions.py to add the vehicle to the database.\"\n ).format(vehicle_id))\n redirect(url_for('add_vehicle', _external=True))\n else: # Inserted vehicle was found\n flash('Vehicle added! \\nid: {}'.format(vehicle_id))\n return redirect(\n url_for('vehicle', vehicle_id=vehicle_id, _external=True))\n\n # form not properly filled out yet\n return render_template('add_vehicle.html',\n title='Add a vehicle',\n form=form)", "def _MakeCreateRequest(args, messages, resources, project,\n future_reservation_ref):\n future_reservation = util.MakeFutureReservationMessageFromArgs(\n messages, resources, args, future_reservation_ref)\n future_reservation.description = args.description\n future_reservation.namePrefix = args.name_prefix\n\n return messages.ComputeFutureReservationsInsertRequest(\n futureReservation=future_reservation,\n project=project,\n zone=future_reservation_ref.zone)", "def command_add(date, start_time, end_time, title, calendar):\n if is_calendar_date(date) and all([(i in range(0, 24)) for i in (start_time, end_time)]) and start_time <= end_time and is_natural_number(str(start_time)) and is_natural_number(str(end_time)):\n event = {\n \"start\": start_time,\n \"end\": end_time,\n \"title\": title,\n }\n if calendar.get(date) is None:\n calendar[date] = [event]\n else:\n calendar[date].insert(0, event)\n # calendar[date].append(event)\n return True\n return False", "def post(self, request): # FIRST EXAMPLE\n model = self._create_booking(\n request=request) # when _create_booking is invoked, historio Client will log model\n print('save me')", "def add_investment():\n\n company_name = request.args.get('company-name')\n date_of_entry = datetime.datetime.today().strftime('%Y-%m-%d')\n \n input_quantity = request.args.get('quantity')\n quantity = int(str(input_quantity).replace(',', ''))\n \n input_cost = request.args.get('cost')\n cost = int(str(input_cost).replace(',', ''))\n\n date_of_investment = request.args.get('date')\n\n new_inv = Investment(date_of_entry=date_of_entry, \n date_of_investment=date_of_investment,\n company_name=company_name, \n quantity=quantity, \n cost=cost)\n \n db.session.add(new_inv)\n db.session.commit()\n\n user_id = session['user']\n new_inv_id = new_inv.inv_id\n\n\n new_userinv = UserInv(inv_id=new_inv_id,\n user_id=user_id)\n db.session.add(new_userinv)\n db.session.commit()\n\n return jsonify('investment added!')", "def extend_reservation(self, key):\n\t\tif self.client is None:\n\t\t\traise UsageError(\"Not connected!\")\n\t\treturn self.client.extend_reservation(key)", "def reservation_data(self):\n reservations = []\n\n for reservation in self.reservations():\n resource = utils.get_resource_by_uuid(reservation.resource)\n\n if resource is None:\n log.warn('Invalid UUID %s' % str(reservation.resource))\n continue\n\n resource = resource.getObject()\n\n data = {}\n\n data['title'] = utils.get_resource_title(resource)\n\n timespans = []\n for start, end in reservation.timespans():\n timespans.append(u'◆ ' + utils.display_date(start, end))\n\n data['time'] = '<br />'.join(timespans)\n data['quota'] = utils.get_reservation_quota_statement(\n reservation.quota\n ) if reservation.quota > 1 else u''\n\n data['url'] = resource.absolute_url()\n data['remove-url'] = ''.join((\n resource.absolute_url(),\n '/your-reservations?remove=',\n reservation.token.hex\n ))\n reservations.append(data)\n\n return reservations", "def reservation_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"reservation_id\")", "def add_calendar(calendar):\n with session_scope(DBSession) as session:\n session.add(calendar)", "def create_request(self, **kwargs):\n if kwargs['ride_id'] in app.database['Rides']:\n request_ids = [x for x in app.database['Requests']]\n if request_ids:\n request_id = max(request_ids) + 1\n else:\n request_id = 1\n self.new_request = Request(\n request_id=request_id,\n ride_id=kwargs['ride_id'],\n status='available'\n )\n request = self.new_request.__dict__\n app.database['Requests'][request_id] = request\n message = 'Ride request created successfully'\n attributes = {\n 'location': '/api/v1/rides/' + str(request_id) + '/requests'\n }\n response = Response.success(message=message, attributes=attributes)\n return response, 201\n meta = {'errors': 1,\n 'source': '/' + str(kwargs['ride_id']) + '/requests'}\n message = 'NOT FOUND'\n return Response.failed(meta=meta, message='NOT FOUND',\n info='The ride requested does not exist'), 404", "def add_ip_reservation_with_http_info(self, body, **kwargs):\n\n all_params = ['body', 'cookie']\n all_params.append('async')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method add_ip_reservation\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `add_ip_reservation`\")\n\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n\n header_params = {}\n if 'cookie' in params:\n header_params['Cookie'] = params['cookie']\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/oracle-compute-v3+json'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/oracle-compute-v3+json'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api('/ip/reservation/', 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='IPReservationResponse',\n auth_settings=auth_settings,\n async=params.get('async'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def __ui_add_discipline(self):\n discipline_id = input(\"discipline id: \")\n discipline_name = input(\"discipline discipline_name: \")\n\n try:\n self.__discipline_controller.add_discipline(discipline_id, discipline_name)\n print(\"Add successful\\n\")\n except DisciplineException as de:\n print(de)\n return\n except RepositoryException as re:\n print(re)\n return", "def test_save_slot(self):\n business = BUSINESS_FACTORY.create_business()\n slot = Slot.objects.create(site_id=2, business_id=business.id,\n start_date = datetime.date.today(),\n end_date = datetime.date.today() + datetime.timedelta(1))\n LOG.debug(slot)\n self.assertTrue(slot.id)\n self.assertEqual(slot.renewal_rate, 10)\n self.assertEqual(slot.is_autorenew, False)", "def add_car(sefl):\n make = \"Test_Toyota\"\n body_type = \"Seden\"\n colour = \"Black\"\n seats = 5\n location = \"-37.814, 144.96332\"\n cost_per_hour = 10.5\n\n newCar = Car( make = make,\n body_type = body_type,\n colour = colour,\n seats = seats,\n location = location,\n cost_per_hour = cost_per_hour,\n booked = True\n\n )\n db.session.add(newCar)\n db.session.commit()\n self.assertTrue(self.carExists(make))", "def test_reservation_ids_two_instances(self):\n (refs, resv_id) = self.compute_api.create(self.context,\n self.default_flavor,\n image_href=uuids.image_href_id,\n min_count=2, max_count=2)\n self.assertEqual(len(refs), 2)\n self.assertIsNotNone(resv_id)\n for instance in refs:\n self.assertEqual(instance['reservation_id'], resv_id)", "def post_another_try(self, request): # SECOND EXAMPLE\n model = self.create_booking(request)\n client.historio().push(model, get_current_user_id(), source='assignment', source_id=model.id) # Magic happens\n # Magic is done", "def post(self, request):\n data = dict(request.data)\n ser = _CreateScheduleSerializer(data=data)\n if ser.is_valid(raise_exception=False):\n ser.save()\n return send_200(\n data={\"data\": ser.data}, message=\"schedule created/updated successfully\"\n )\n else:\n return send_400(\n status=\"FAILURE\",\n data={\"errors\": ser.errors},\n message=ser.extract_error_msg(),\n )", "def update_reservation(self, reservation_id, gs_id, vehicle_id, user_id):\n\n # find the reservation\n reserv_model = Reservation(self.settings)\n r = reserv_model.find_reservation(reservation_id)\n\n # update the reservation\n status = r.update(gs_id, vehicle_id, user_id)\n\n # find the updated reservation\n r = reserv_model.find_reservation(reservation_id)\n \n # return the model\n json_model = r.to_json()\n return status, json_model", "def crear_registro(request):\n if request.method == 'POST':\n att = AttentionType.objects.get(name=request.POST['tipo_atencion'])\n if request.POST['pin']:\n persona = Persona.objects.get(pin=request.POST['pin'])\n try:\n numero = visualizador(request).content\n\n atencion = InitialAttention.objects.get(\n attention_number=numero,\n attention_type=att,\n created__contains=timezone.now().date()\n )\n\n registro_guardado = Registers.objects.create(\n pin=persona,\n attention_number=atencion,\n priority_attention=False,\n attention_type=att,\n start_attention=timezone.now(),\n observations=request.POST['observaciones'] if request.POST['observaciones'] else '',\n finish_attention=timezone.now(),\n tiempo_espera=Decimal(format((timezone.now()-atencion.created).seconds / 60, '.1f')),\n sellplace=SellPlace.objects.get(id_sellplace=1),\n sucursal=Sucursal.objects.get(id_sucursal=1),\n )\n serializer = RegistersSerializer(registro_guardado)\n except ValueError:\n return JSONResponse('No hay turnos para ser Atendidos!', status=400)\n except ObjectDoesNotExist:\n return JSONResponse('No hay turnos para ser Atendidos!', status=400)\n\n return JSONResponse(serializer.data, status=201)", "def add_trip(request):\n trip_name = request.POST.get('trip_name', None)\n start_date_tx = request.POST.get('start_date_tx', None)\n city_id = request.POST.get('city_id', None)\n\n if not trip_name or not start_date_tx or not city_id:\n # incorrect request received\n error_message = \"Missing parameters in request. Send trip_name, city_id, start_date_tx\"\n return Response(error_message, status=status.HTTP_400_BAD_REQUEST)\n\n try:\n city = City.objects.get(pk=city_id)\n trip = Trip(trip_name=trip_name, city=city, start_date_tx=start_date_tx)\n trip.save()\n trip.users.add(request.user)\n except Exception as e:\n error_message = str(e)\n return Response(error_message, status=status.HTTP_400_BAD_REQUEST)\n\n success_message = \"Sucessfully added new trip.\"\n return Response(success_message, status=status.HTTP_201_CREATED)", "def reserve(self, context, deltas, expire=None, project_id=None):\n if not project_id:\n project_id = context.project_id\n reservations = self._driver.reserve(context, self._resources, deltas,\n expire=expire,\n project_id=project_id)\n\n LOG.debug(\"Created reservations %s\", reservations)\n\n return reservations", "def create_candidate(self, data, header):\n return self.client.post(\n path='/api/v2/office/1/register/', data=json.dumps(data), content_type='application/json', headers=header)", "def post(self):\n data = request.json\n return VehicleServices(data=data).save_new_item()", "def add_station(self, station_id=None, time=None, location=None):", "def add_car():\n\n make = request.form[\"make\"]\n bodytype = request.form[\"bodytype\"] \n color = request.form[\"color\"]\n seats = request.form[\"seats\"]\n location = request.form[\"location\"]\n costperhour = request.form[\"costperhour\"]\n\n # create a new Car object.\n new_car = Car(make=make, bodytype=bodytype, color=color, seats=seats, location=location, costperhour=costperhour)\n\n # add new car to db\n db.session.add(new_car)\n # commit the new add.\n db.session.commit()\n\n return carSchema.jsonify(new_car)", "def select_reservation(self, ctx: dataclasses.dataclass) -> ResultE[dataclasses.dataclass]:\n pk = cf.get_int_or_none(ctx.pk) or 0\n if pk <= 0:\n return self._error('Missed Reservation ID', ctx, ReservationErrors.missed_reservation)\n try:\n data = self._reservations_repo.get(pk)\n except Exception as err:\n return self._error(\n f\"Error select Reservation ID={pk} in House ID={ctx.house.id}\", ctx, ReservationErrors.error, exc=err\n )\n if data == Nothing:\n return self._error(\n f\"Unknown Reservation ID={pk} in House ID={ctx.house.id}\", ctx, ReservationErrors.missed_reservation\n )\n if hasattr(ctx, 'source'):\n ctx.source = data.unwrap()\n else:\n ctx.reservation = data.unwrap()\n return Success(ctx)", "def add_room(self, room):\n self.rooms.append(room)", "def __ui_add_new_activity(self):\n activity_id = int(input(\"Activity ID: \"))\n existing_persons_ids = self.__person_service.get_existing_persons_ids()\n string_of_participants_ids = input(\n f\"Participants' IDs (you can choose from the list: {existing_persons_ids})\\n > \")\n list_of_participants_ids = self.__ui_convert_ids_string_to_list(string_of_participants_ids)\n activity_description = input(\"Describe the activity: \")\n activity_date = {\n \"year\": int(input(\"Year: \")),\n \"month\": int(input(\"Month: \")),\n \"day\": int(input(\"Day: \"))\n }\n activity_time = int(input(\"Time: \"))\n\n self.__activity_service.service_add_activity(activity_id,\n list_of_participants_ids,\n activity_date,\n activity_time,\n activity_description)\n print(\"Activity successfully added to your agenda!\\n\")", "def set_reservation_name(self, _name):\n self.reservation_holder.set_name(_name)", "def create():\n config = request.data\n return add_scheduling_block(config)", "def create_rental(self, student_id:int, rental_instrument:int, start_date:date, months_to_rent:int):\n try:\n s = start_date\n start_date = self.date_to_strf(s)\n # end_date = \"{}-{:02d}-{:02d}\".format(s.year, s.month + months_to_rent, s.day)\n self.cursor.execute(\"\"\" \n INSERT INTO rental (start_date, end_date, student_id, ri_id)\n VALUES (%s, %s::date + INTERVAL '%s month', %s , %s)\n \"\"\", [start_date, start_date, months_to_rent, student_id, rental_instrument])\n self.db.commit()\n except Exception as e:\n self.db.rollback()\n raise RuntimeError(\"No student found to be able to complete rental.\")", "def test_add_schedule(self):\n body = Schedule()\n response = self.client.open('/v1/schedule',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def reservation_detail(request, reservation_number):\n if not request.user.is_superuser:\n messages.error(request, \"Sorry, you don't have access to this \\\n part of the site.\")\n return redirect(reverse('home'))\n\n amenities = Amenity.objects.all()\n reservation = get_object_or_404(Reservation,\n reservation_number=reservation_number)\n\n messages.info(request, f'This is the confirmation email sent to the guests \\\n after booking for reservation number {reservation_number}.')\n\n template = 'checkout/checkout_success.html'\n context = {\n 'reservation': reservation,\n 'admin': True,\n 'amenities': amenities,\n }\n return render(request, template, context)", "def add_parcel(self):\n\n # we check the request object the user sends to\n # validate it has enough information\n if not request.json():\n return {\"Error\": \"Bad request\"}, 400\n if not request.json['sender']:\n return {\"Error\": \"Please include the sender name\"}, 400\n if not request.json[\"recipient\"]:\n return {\"Error\": \"Please include the recipient\"}, 400\n if not request.json[\"destination\"]:\n return {\"Error\": \"You must specify a destination\"}, 400\n if not request.json[\"weight\"]:\n return {\"Error\": \"You must specify the weight\"}, 400\n if not request.json[\"pickup\"]:\n return {\"Error\": \"What is the pickup location?\"}, 400\n\n data = {\n 'id': len(parcels) + 1,\n 'sender': request.json['sender'],\n 'recipient': request.json['recipient'],\n 'destination': request.json['destination'],\n 'weight': request.json['weight'],\n 'pickup': request.json['pickup']\n }\n\n self.db.append(data)\n return {\"Success\": \"Added parcel\" + data}, 201", "def create_race():\n # create a new race and set user_id to current_user\n user_id = current_user.id\n newRace = Race(user_id=user_id, date=datetime.now())\n\n # update database and commit changes\n db.session.add(newRace)\n db.session.commit()\n return redirect(url_for('setup.race_setup', race_id=newRace.id))", "def insertroute(self, new_route):\n route_key = new_route['start'] + \",\" + new_route['finish']\n error, exists, message, code, lines = self.selectroute(route_key)\n if error or exists:\n return False, message, code\n else:\n new_route_line = new_route['start'] + \",\" + new_route['finish'] + \",\" + str(new_route['cost'])\n error, message, code = self.commandroute('Insert', lines, new_route_line)\n if not error:\n return True, message, 201\n else:\n return False, message, code", "def show_reservation(self, reservation_id):\n\n # create an instance of the model\n reserv_model = Reservation(self.settings)\n\n # query the model\n r = reserv_model.find_reservation(reservation_id)\n\n # return the result in a json-ifiable form\n json_reservation = r.to_json()\n\n # return\n print json_reservation\n return json_reservation", "def add_room(self, room):\n self.rooms[room.name] = room", "def _quota_reservations(session, context, reservations):\n\n # Get the listed reservations\n return model_query(context, models.Reservation,\n read_deleted=\"no\",\n session=session).\\\n filter(models.Reservation.uuid.in_(reservations)).\\\n with_lockmode('update').\\\n all()", "def add_entry(self, start_day, start_hour, stop_day, stop_hour, mode, radar=[\"ALL\"]):\n self.entries.append(Entry(self.year, self.month, start_day, start_hour, stop_day, stop_hour, mode, radar))", "def post(self, request):\n age = request.POST['age']\n gender = request.POST['gender']\n occupation = request.POST['occupation']\n zip_code = request.POST['zip_code']\n new_rater = Rater(age=age, gender=gender, occupation=occupation, zip_code=zip_code)\n new_rater.save()\n return HttpResponse(json.dumps(new_rater), content_type='application/json', status=201)", "def new_task(self):\n print \"Create a new task.\"\n\n # Collect new task info from user\n description = raw_input(\"Enter task (140 characters max) > \")\n due_date = raw_input(\"Enter due date as 'year-mm-dd' (optional). > \")\n tags = raw_input(\n \"Enter tags for the task (comma separated) (optional). > \")\n tag_list = [tag.strip() for tag in tags.split(',')]\n try:\n new_task = doto.Task(self.user, description, due_date, tag_list)\n except (NameError, ValueError) as e:\n # On error, print and return.\n print \"Task not created. Error: \", e\n raw_input(\"Press Enter to continue.\")\n return\n self.current_collection.add(new_task)\n return", "def create_flight_needs_task(self):\n duration = self.trip.arrival_date_time - self.trip.departure_date_time\n if duration > timedelta(hours=2):\n self.tasks.append(self.trip.tasks.create(\n title=\"Flight Must Have !\",\n comments=\"It's a long flight ! Don't forget your earplugs and your sleep mask.\",\n category=TaskCategory.objects.get(name=\"Others\"),\n deadline=self.trip.departure_date_time - timedelta(days=1)\n ))\n else:\n self.tasks.append(self.trip.tasks.create(\n title=\"Flight Must Have !\",\n comments=\"Take some food and some drinks for your flight\",\n category=TaskCategory.objects.get(name=\"Others\"),\n deadline=self.trip.departure_date_time - timedelta(days=1)\n ))", "def reserve(self):\n assert self.is_available() is True, \"this slot is not available\"", "def reservation_details():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n reservation_id = request.args.get('reservation-id', None)\n reservation = get_reservation_identified_by_id(reservation_id)\n car = get_car_identified_by_id(reservation.id_car)\n date_from = str(reservation.date_from)\n date_to = str(reservation.date_to)\n total_price = get_total_price(reservation_id)\n if check_authentication(session_id, user_id) and is_reservation_of_the_user(reservation_id, user_id):\n return render_template('car_reservation_details.html', user=user_id, session_id=session_id, car=car,\n reservation_id=reservation_id, date_from=date_from,\n date_to=date_to, total_price=total_price)\n else:\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=False,\n preview_length=get_cars_preview().__len__(), del_session_cookie=True)", "def post(self):\r\n\t\tdataResponse = defaultResponse\r\n\t\tq = \"\"\"\tUPDATE scooters\r\n\t\t\t\tSET is_reserved = false\r\n\t\t\t\tWHERE id={id.int}\r\n\t\t\t\tAND is_reserved = true\r\n\t\t\t\tRETURNING true;\r\n\t\t\t\t\"\"\"\r\n\t\tpayload, newQuery = Validator.validatePayload(q, request)\r\n\t\tif payload:\r\n\t\t\t# End reservation success code\r\n\t\t\tdataResponse = getQueryResponse(payload, newQuery, queryType='update')\r\n\r\n\t\tif dataResponse[0]:\r\n\t\t\tq = \"\"\"SELECT ST_Distance(location, ST_MakePoint({endlng.float}, {endlat.float})) FROM scooters WHERE id = {id.int}\"\"\"\r\n\t\t\tpayload, newQuery = Validator.validatePayload(q, request)\r\n\t\t\tif payload:\r\n\t\t\t\t# Charge customer and update scooter location.\r\n\t\t\t\tdistanceTraveled = getQueryResponse(payload, newQuery, queryType='query')[0]\r\n\t\t\t\tdistanceTraveled = distanceTraveled[0]\r\n\t\t\t\twhile type(distanceTraveled) is tuple:\r\n\t\t\t\t\tdistanceTraveled = distanceTraveled[0]\r\n\t\t\t\tdistanceTraveled = roundup(distanceTraveled) if distanceTraveled > 0 else 1 # Min distance traveled is always 1.\r\n\t\t\t\tpricePerMeter = 1.0 # Ideally, this value is should not be hard coded\r\n\t\t\t\tfareCost = pricePerMeter * distanceTraveled\r\n\r\n\t\t\t\tq = \"\"\"UPDATE users\r\n\t\t\t\t\tSET (last_fare, fares, scooter_ids, distances_traveled) = ({fareCost}::real, array_append(fares, {fareCost}::real),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tarray_append(scooter_ids, {id.int}::bigint),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tarray_append(distances_traveled, {distanceTraveled}::bigint))\r\n\t\t\t\t\tWHERE id={userid.int};\r\n\r\n\t\t\t\t\tUPDATE scooters\r\n\t\t\t\t\tSET (lon, lat, distances_traveled, rider_ids, location) = ({endlng.float}, {endlat.float},\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tarray_append(distances_traveled, {distanceTraveled}::bigint),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tarray_append(rider_ids, {userid.int}::bigint),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tST_POINT({endlng.float}, {endlat.float}))\r\n\t\t\t\t\tWHERE id = {id.int};\r\n\r\n\t\t\t\t\t\"\"\"\r\n\t\t\t\tq = q.replace('{fareCost}', str(fareCost)).replace('{distanceTraveled}', str(distanceTraveled)) # Partial format subtitution\r\n\r\n\t\t\t\tpayload, newQuery = Validator.validatePayload(q, request)\r\n\t\t\t\tif payload:\r\n\t\t\t\t\t_ = getQueryResponse(payload, newQuery, queryType='update')\r\n\r\n\t\treturn dataResponse", "def add_room(self, data):\n room_id = data['room_id']\n x, y = literal_eval(data['coordinates'])\n room_data = {'id': data['room_id'],\n 'title': data['title'],\n 'description' : data['description'],\n 'coordinates': literal_eval(data['coordinates']),\n 'elevation': data['elevation'],\n 'terrain': data['terrain'],\n 'exits' : {direction: '?' for direction in data['exits']}\n }\n self.rooms.setdefault(room_id, room_data)", "def __ui_add_new_person(self):\n person_id = int(input(\"ID: \"))\n person_name = input(\"Name: \").strip()\n person_phone_number = input(\"Phone number: \").strip()\n self.__person_service.service_add_person(person_id, person_name, person_phone_number)\n print(\"Person successfully added to your agenda!\\n\")", "def test_save_slot_same_start_end(self):\n business = BUSINESS_FACTORY.create_business()\n with self.assertRaises(ValidationError) as context_manager:\n Slot.objects.create(site_id=2, business_id=business.id,\n start_date=datetime.date.today(),\n end_date=datetime.date.today())\n self.fail('Invalid slot saved.')\n LOG.debug(context_manager.exception)", "async def get_reservation_endpoint(request):\n reservation_id = request.args[\"reservation_id\"][0]\n reservation_dict = model.get_reservation(reservation_id)\n return json(reservation_dict)", "def add_bus():\n\n return render_template('bus-add-form.html')", "def insertRoom(cursor, room: Room) -> None:\n\n # TODO: Calculate the values. room object may not contain the creator\n # or maybe remove that feature\n cursor.execute(\n \"INSERT INTO rooms VALUES (?, ?, ?)\",\n (roomID, roomName, creator)\n )", "def newEquipment(recipe):\r\n db = db_helpers.getDbCon()\r\n cursor = db.cursor()\r\n equipmentInsertQuery = \"\"\"INSERT into equipment (equipment_id, equipment_name) \r\n VALUES (%s, %s) ON Duplicate KEY UPDATE equipment_id = equipment_id;\"\"\"\r\n try:\r\n for instr in recipe.instructions:\r\n for equip in instr.equipment:\r\n cursor.execute(equipmentInsertQuery, (equip.equipment_id, equip.equipment_name))\r\n db.commit()\r\n except Exception:\r\n print(\"Error: OOPs something went wrong while adding new equipment to the database\")\r\n finally:\r\n cursor.close()\r\n db.close()", "def add_transaction(self):\r\n transactionvariable = self.transactionvariable.get()\r\n transactionvariable = (ast.literal_eval(transactionvariable)[0]) # converts to tuple\r\n pattern = re.compile('\\d+(\\.\\d+)?')\r\n match = re.search(pattern, self.difference_box.get())\r\n if self.difference_box.get() == \"\":\r\n pass\r\n else:\r\n if match: \r\n self.cursor.execute(\"\"\"UPDATE transactions SET Difference = ? WHERE TransactionID = ?\"\"\",\r\n (self.difference_box.get(), transactionvariable,))\r\n else:\r\n messagebox.showinfo(\"Error\", \"Transaction incorrect format (+/-DD)\")\r\n\r\n if self.dateandtime_box.get() == \"\":\r\n pass\r\n else:\r\n try:\r\n datetime.strptime(self.dateandtime_box.get(), '%Y-%m-%d %H:%M:%S') \r\n self.cursor.execute(\"\"\"UPDATE transactions SET DateAndTime = ? WHERE TransactionID = ?\"\"\",\r\n (self.dateandtime_box.get(), transactionvariable,))\r\n except ValueError:\r\n messagebox.showinfo(\"Error\", \"Date and time incorrect format (YYYY-MM-DD HH:MM:SS)\")\r\n\r\n self.db.commit()\r\n self.edit_transaction_window.destroy()\r\n FinancesFrame.update_table(self)", "async def create_deposit_address(self, code: str, params={}):\n await self.load_markets()\n request = {\n 'op_renew': 1,\n }\n return await self.fetch_deposit_address(code, self.extend(request, params))", "def add(table):\n\n list_labels = [\"Name: \", \"Manufacturer: \", \"purchase_date: \", \"Durability: \"]\n data_input = ui.get_inputs(list_labels, \"Add new record\")\n\n id_ = common.generate_random(table)\n is_date_number = data_input[2].isdigit() and len(data_input) == 4\n is_durability_number = data_input[3].isdigit()\n\n if is_date_number is True and is_durability_number is True:\n data_input.insert(0, id_)\n table.append(data_input)\n\n elif is_date_number is False:\n ui.print_error_message(\"Wrong year format! Record add failed!\")\n\n elif is_durability_number is False:\n ui.print_error_message(\"Wrong durability format! Record add failed!\")\n\n return table" ]
[ "0.75879747", "0.7112902", "0.6949954", "0.6692203", "0.650362", "0.63385224", "0.6337414", "0.62899226", "0.62474537", "0.61857814", "0.6139244", "0.61172265", "0.60497403", "0.60310566", "0.6028615", "0.6002983", "0.596607", "0.58925354", "0.5867518", "0.5861634", "0.58234113", "0.58197767", "0.58127135", "0.5779862", "0.5776109", "0.576195", "0.57400244", "0.5672108", "0.56688523", "0.5621449", "0.56181633", "0.5615572", "0.5604731", "0.55813235", "0.55622816", "0.554542", "0.55402225", "0.55184805", "0.54686767", "0.54477584", "0.5423418", "0.5417296", "0.5409003", "0.5407886", "0.53972465", "0.536953", "0.5368703", "0.5350277", "0.5337995", "0.53321993", "0.53093284", "0.53085744", "0.5295863", "0.52952003", "0.5292345", "0.5281538", "0.52769256", "0.52575815", "0.5250901", "0.5235191", "0.5233815", "0.5224631", "0.521705", "0.521281", "0.5192634", "0.5188829", "0.5180434", "0.5170779", "0.5169888", "0.5166626", "0.5154033", "0.51453793", "0.5139463", "0.51354", "0.513379", "0.51262337", "0.5125381", "0.51167077", "0.5110751", "0.5110105", "0.5103854", "0.5093096", "0.50697124", "0.50661707", "0.5065463", "0.50638705", "0.50636554", "0.5060009", "0.5058642", "0.5049605", "0.50460064", "0.5044343", "0.50433046", "0.50330937", "0.5031955", "0.50236994", "0.50165325", "0.5009531", "0.5000557", "0.4999858" ]
0.75634503
1
Get an existing reservation
async def get_reservation_endpoint(request): reservation_id = request.args["reservation_id"][0] reservation_dict = model.get_reservation(reservation_id) return json(reservation_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reservation(self):\n return self.request.get('reservation', None)", "def reservation(self):\n return self._reservation", "def reservation_read(token_user, res_id):\n res = Reservation.query.get(res_id)\n if res is None:\n abort(404, 'reservation not found')\n\n return json.dumps(res.as_dict(for_user=token_user))", "def select_reservation(self, ctx: dataclasses.dataclass) -> ResultE[dataclasses.dataclass]:\n pk = cf.get_int_or_none(ctx.pk) or 0\n if pk <= 0:\n return self._error('Missed Reservation ID', ctx, ReservationErrors.missed_reservation)\n try:\n data = self._reservations_repo.get(pk)\n except Exception as err:\n return self._error(\n f\"Error select Reservation ID={pk} in House ID={ctx.house.id}\", ctx, ReservationErrors.error, exc=err\n )\n if data == Nothing:\n return self._error(\n f\"Unknown Reservation ID={pk} in House ID={ctx.house.id}\", ctx, ReservationErrors.missed_reservation\n )\n if hasattr(ctx, 'source'):\n ctx.source = data.unwrap()\n else:\n ctx.reservation = data.unwrap()\n return Success(ctx)", "def show_reservation(self, reservation_id):\n\n # create an instance of the model\n reserv_model = Reservation(self.settings)\n\n # query the model\n r = reserv_model.find_reservation(reservation_id)\n\n # return the result in a json-ifiable form\n json_reservation = r.to_json()\n\n # return\n print json_reservation\n return json_reservation", "def get_res_by_id(res_id):\n # look up ID, if non-exist return error message\n res = session.query(Reservation).filter_by(id=res_id).first()\n if not res:\n return jsonify({'error': 'no reservation with id {} found'.format(res_id)}), 400\n return jsonify({'reservation': res.serialize()})", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Reservation':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ReservationArgs.__new__(ReservationArgs)\n\n __props__.__dict__[\"concurrency\"] = None\n __props__.__dict__[\"creation_time\"] = None\n __props__.__dict__[\"ignore_idle_slots\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"multi_region_auxiliary\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"reservation_id\"] = None\n __props__.__dict__[\"slot_capacity\"] = None\n __props__.__dict__[\"update_time\"] = None\n return Reservation(resource_name, opts=opts, __props__=__props__)", "def get_pass_reservations(pass_id):\n cursor.execute(\"\"\"select * from reservations where paying_passenger_id= %s\"\"\", [pass_id]) #query\n reservations = cursor.fetchall() #fetch all reservations related to that passenger\n\n return reservations", "def reservation_add(token_user):\n if not json_param_exists('team_id') or \\\n not json_param_exists('room_id') or \\\n not json_param_exists('start') or \\\n not json_param_exists('end'):\n abort(400, 'one or more required parameter is missing')\n\n team_id = request.json['team_id']\n team = Team.query.get(team_id)\n if team is None:\n abort(400, 'invalid team id')\n\n if not (token_user.has_permission('reservation.create') and team.has_member(token_user)):\n abort(403)\n\n room_id = request.json['room_id']\n room = Room.query.get(room_id)\n if room is None:\n abort(400, 'invalid room id')\n\n start = parse_datetime(request.json['start'])\n end = parse_datetime(request.json['end'])\n if start is None or end is None:\n abort(400, 'cannot parse start or end date')\n\n if start >= end:\n abort(400, \"start time must be before end time\")\n\n res = Reservation(team=team, room=room, created_by=token_user,\n start=start, end=end)\n\n attempt_override = False\n if json_param_exists(\"override\") and isinstance(request.json[\"override\"], bool):\n attempt_override = request.json[\"override\"]\n\n conflict_status, conflicting_reservations = res.validate_conflicts()\n if conflict_status == Reservation.NO_CONFLICT:\n pass\n elif conflict_status == Reservation.CONFLICT_OVERRIDABLE:\n if attempt_override:\n # Delete conflicting reservations\n for conflict in conflicting_reservations:\n get_db().delete(conflict)\n else:\n return json.dumps({\"overridable\": True}), 409\n elif conflict_status == Reservation.CONFLICT_FAILURE:\n return json.dumps({\"overridable\": False}), 409\n\n get_db().add(res)\n get_db().commit()\n\n return '', 201", "def reservation_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"reservation_id\")", "def reservation_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"reservation_id\")", "def get_wharton_gsr_reservations():\n\n sessionid = get_wharton_sessionid()\n\n if not sessionid:\n return jsonify({'error': 'No Session ID provided.'})\n\n try:\n reservations = wharton.get_reservations(sessionid)\n save_wharton_sessionid()\n return jsonify({'reservations': reservations})\n except APIError as e:\n return jsonify({\"error\": str(e)}), 400", "def create_new_reservation():\n if not request.json:\n return jsonify({'error': 'no body supplied'}), 400\n\n # look up by date to see if any availability\n res_date = request.json.get('date', None)\n if not res_date:\n error = 'no reservation date supplied'\n flash(error, 'error')\n return jsonify({'error': error}), 400\n\n # check if res time present, if found, convert to DT object\n res_time = request.json.get('time', None)\n if not res_time:\n error = 'no reservation time supplied'\n flash(error, 'error')\n return jsonify({'error': error}), 400\n res_time = time_str_to_obj(res_time)\n\n open_inventory = session.query(Inventory).filter_by(date=res_date).all()\n if not open_inventory:\n error = 'no open inventory for date {}'.format(res_date)\n flash(error, 'error')\n return jsonify({'error': error})\n\n error = 'reservation invalid'\n for inv in open_inventory:\n for window in inv.windows:\n if window.current_res_count < window.max_res_count:\n # check if res date falls in current window\n window_start = time_str_to_obj(window.start_time)\n window_end = time_str_to_obj(window.end_time)\n\n # if requested res time is valid, update res count and save res\n if window_start <= res_time < window_end:\n window.current_res_count = window.current_res_count + 1\n session.add(window)\n\n res = Reservation(**request.json)\n session.add(res)\n resp = session.commit()\n if not resp:\n # send message to flask for creation by name\n flash('reservation for {} created'.format(request.json.get('name')), 'success')\n return jsonify({'message': 'reservation for {} created'.format(request.json.get('name'))})\n else:\n error = 'requested reservation time is not available in current inventory'\n else:\n error = 'current inventory window cannot accept additional reservations, please select different time'\n flash(error, 'error')\n return jsonify({'error': error}), 400", "def reservation_details():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n reservation_id = request.args.get('reservation-id', None)\n reservation = get_reservation_identified_by_id(reservation_id)\n car = get_car_identified_by_id(reservation.id_car)\n date_from = str(reservation.date_from)\n date_to = str(reservation.date_to)\n total_price = get_total_price(reservation_id)\n if check_authentication(session_id, user_id) and is_reservation_of_the_user(reservation_id, user_id):\n return render_template('car_reservation_details.html', user=user_id, session_id=session_id, car=car,\n reservation_id=reservation_id, date_from=date_from,\n date_to=date_to, total_price=total_price)\n else:\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=False,\n preview_length=get_cars_preview().__len__(), del_session_cookie=True)", "def get_reservations():\n start_date = request.args.get('start')\n end_date = request.args.get('end')\n\n if start_date is not None and end_date is not None:\n start = parse_datetime(request.json['start'])\n end = parse_datetime(request.json['end'])\n if start is None or end is None:\n abort(400, 'cannot parse start or end date')\n\n reservations = Reservation.query.filter(\n Reservation.end >= start, Reservation.start <= end)\n else:\n reservations = Reservation.query.filter(\n or_(Reservation.start >= datetime.datetime.now(),\n Reservation.end >= datetime.datetime.now()))\n\n reservations = map(lambda x: x.as_dict(), reservations)\n\n return json.dumps(reservations)", "def get_reservations_endpoint():\n\n email = request.args.get('email')\n sessionid = request.args.get('sessionid')\n if not email and not sessionid:\n return jsonify({\"error\": \"A session id or email must be sent to server.\"}), 400\n\n libcal_search_span = request.args.get(\"libcal_search_span\")\n if libcal_search_span:\n try:\n libcal_search_span = int(libcal_search_span)\n except ValueError:\n return jsonify({\"error\": \"Search span must be an integer.\"}), 400\n else:\n libcal_search_span = 3\n\n try:\n reservations = get_reservations(email, sessionid, libcal_search_span)\n return jsonify({'reservations': reservations})\n except APIError as e:\n return jsonify({\"error\": str(e)}), 400", "async def add_reservation_endpoint(request):\n hotel_id = request.args[\"hotel_id\"][0]\n room_type = request.args[\"room_type\"][0]\n arrival_date = request.args[\"arrival_date\"][0]\n departure_date = request.args[\"departure_date\"][0]\n status = request.args[\"status\"][0]\n reservation_id = model.add_reservation(hotel_id, room_type, arrival_date, departure_date, status)\n if reservation_id == model.OPERATION_ERROR_RETURN_CODE:\n return json({\"success\": False})\n return json({\"success\": True, \"reservation_id\": reservation_id})", "def _quota_reservations_query(context, reservations):\n return model_query(\n context, models.Reservation,\n read_deleted=\"no\",\n ).filter(\n models.Reservation.uuid.in_(reservations),\n ).with_for_update()", "def reservation_detail(request, reservation_number):\n if not request.user.is_superuser:\n messages.error(request, \"Sorry, you don't have access to this \\\n part of the site.\")\n return redirect(reverse('home'))\n\n amenities = Amenity.objects.all()\n reservation = get_object_or_404(Reservation,\n reservation_number=reservation_number)\n\n messages.info(request, f'This is the confirmation email sent to the guests \\\n after booking for reservation number {reservation_number}.')\n\n template = 'checkout/checkout_success.html'\n context = {\n 'reservation': reservation,\n 'admin': True,\n 'amenities': amenities,\n }\n return render(request, template, context)", "def getReservationDict():\n table = 'reservations'\n connection = openConnection()\n curs = connection.cursor()\n sqlcmd = \"SELECT * FROM \" + str(table)\n d = {}\n \n curs.execute(sqlcmd)\n for row in curs.fetchall():\n flightData = airlineClasses.Flight(row[2],\n row[3],row[4],row[5],row[6],row[7],row[8],row[9],row[10])\n reservation = airlineClasses.Reservation(row[0],row[1],flightData)\n d[reservation.reservationId] = reservation\n\n curs.close()\n connection.close()\n \n return d", "def reservation_update(token_user, res_id):\n if not json_param_exists('room_id') or \\\n not json_param_exists('start') or \\\n not json_param_exists('end'):\n abort(400, 'one or more required parameter is missing')\n\n room_id = request.json['room_id']\n room = Room.query.get(room_id)\n if room is None:\n abort(400, 'invalid room id')\n\n start = parse_datetime(request.json['start'])\n end = parse_datetime(request.json['end'])\n if start is None or end is None:\n abort(400, 'cannot parse start or end date')\n\n res = Reservation.query.get(res_id)\n if res is None:\n abort(400, 'invalid reservation id')\n\n if not token_user.has_permission('reservation.update.elevated'):\n is_my_reservation = any(map(lambda m: m.id == token_user.id,\n res.team.members))\n if not (is_my_reservation and\n token_user.has_permission('reservation.update')):\n abort(403, 'insufficient permissions to update reservation')\n\n res.room = room\n res.start = start\n res.end = end\n\n attempt_override = False\n if json_param_exists(\"override\") and isinstance(request.json[\"override\"], bool):\n attempt_override = request.json[\"override\"]\n\n conflict_status, conflicting_reservations = res.validate_conflicts()\n if conflict_status == Reservation.NO_CONFLICT:\n pass\n elif conflict_status == Reservation.CONFLICT_OVERRIDABLE:\n if attempt_override:\n # Delete conflicting reservations\n for conflict in conflicting_reservations:\n get_db().delete(conflict)\n else:\n return json.dumps({\"overridable\": True}), 409\n elif conflict_status == Reservation.CONFLICT_FAILURE:\n return json.dumps({\"overridable\": False}), 409\n\n get_db().commit()\n\n return '', 204", "def _quota_reservations(session, context, reservations):\n\n # Get the listed reservations\n return model_query(context, models.Reservation,\n read_deleted=\"no\",\n session=session).\\\n filter(models.Reservation.uuid.in_(reservations)).\\\n with_lockmode('update').\\\n all()", "def show_reservations(self, user_id = None):\n\n # create an instance of the model\n reserv_model = Reservation(self.settings)\n\n # query the model\n results = reserv_model.find_reservations(user_id)\n\n # return the result in a json-ifiable form\n json_results = []\n for reservation in results:\n json_results.append(reservation.to_json())\n\n # return\n print json_results\n return json_results", "def reservations(self):\n session_id = plone_session.get_session_id(self.context)\n return db.reservations_by_session(session_id).all()", "def reservation_data(self):\n reservations = []\n\n for reservation in self.reservations():\n resource = utils.get_resource_by_uuid(reservation.resource)\n\n if resource is None:\n log.warn('Invalid UUID %s' % str(reservation.resource))\n continue\n\n resource = resource.getObject()\n\n data = {}\n\n data['title'] = utils.get_resource_title(resource)\n\n timespans = []\n for start, end in reservation.timespans():\n timespans.append(u'◆ ' + utils.display_date(start, end))\n\n data['time'] = '<br />'.join(timespans)\n data['quota'] = utils.get_reservation_quota_statement(\n reservation.quota\n ) if reservation.quota > 1 else u''\n\n data['url'] = resource.absolute_url()\n data['remove-url'] = ''.join((\n resource.absolute_url(),\n '/your-reservations?remove=',\n reservation.token.hex\n ))\n reservations.append(data)\n\n return reservations", "def reservation(self, reservation):\n\n self._reservation = reservation", "def reservs(request):\n a = request.GET\n print(a)\n if request.method == 'POST':\n # create a form\n form = NewReservationsOfficesForm(data=request.POST)\n if form.is_valid():\n form.save()\n return redirect('coworkings:index')\n else:\n form = NewReservationsOfficesForm()\n\n context = {\"form\": form}\n return render(request, 'coworkings/reservs.html', context)", "def create_reservation(self, gs_id, vehicle_id, user_id):\n\n # create the reservation\n reservation = Reservation(self.settings, gs_id, vehicle_id, user_id)\n status, model = reservation.create()\n\n # return status\n if status:\n json_res = model.to_json()\n return True, json_res\n else:\n return False, None", "def check_reservation(self, gs_id, vehicle_id, user_id, res_type):\n\n # perform the query\n status = reservation_exists(self.settings, user_id, vehicle_id, gs_id, res_type) \n\n # return status\n if status:\n return True\n else:\n return False", "def reserve_seat(self):\n try:\n # start a new transaction\n self.cnx.start_transaction()\n cur = self.cnx.cursor()\n\n # iterate through the rows of the result until\n # we find a seat that is open\n cur.execute(\"select seat, status from Flights\")\n found = None\n for row in cur.fetchall():\n if row[1] == 0:\n found = row[0]\n break\n\n # if we found an available seat\n if found is not None:\n # wait for user to confirm they want the seat\n print \"seat \", found, \" is open. <Enter> to continue.\"\n sys.stdin.readline()\n\n # update that the seat is taken\n cur.execute(\"update Flights set status = 1 where seat = %s\", (found,))\n self.cnx.commit()\n return found\n else:\n # if failed to reserve that seat then rollback and return None to indicate failure\n self.cnx.rollback()\n return None\n except mysql.connector.InternalError as e:\n print \"failed to reserve: \", e\n try:\n self.cnx.rollback()\n except mysql.connector.InternalError as e:\n # silence\n pass\n return None", "def get_object(self):\n reserva = super().get_object()\n print('asdadasdasdasd', reserva)\n if not reserva.cancelable:\n raise Http404\n return reserva", "def retrieve_clothes_reservations(payload, clothes_id):\n selection = Reserve.query.filter_by(clothes_id=clothes_id).all()\n # if the given clothes has not been reserved, abort 404\n if len(selection) == 0:\n abort(404)\n # if two or more user reserved the same clothe, abort umprocessable\n if len(selection) >= 2:\n abort(422)\n reservation = selection[0]\n\n # querying who is accessing and check role\n access_user = User.query.filter_by(auth0_id=payload['sub']).first()\n role = access_user.role\n # if user role is \"user\", check if access user_id matches\n # reservation user_id\n reserved_user = reservation.user\n if role == 'user' and access_user.id != reserved_user.id:\n raise AuthError({\n 'code': 'Invalid_claims',\n 'description': 'Unauthorized access by user'\n }, 401)\n\n # query clothes\n clothes = reservation.clothes\n\n return jsonify({\n 'success': True,\n 'clothes': clothes.format(),\n 'user': reserved_user.format()\n })", "def place(request, place_id):\n office_reserv_time = NumberOffice.objects.get(id=place_id)\n reservs = office_reserv_time.reservation_set.order_by('id')\n\n context = {\"office_reserv_time\": office_reserv_time, 'reservs': reservs}\n return render(request, 'coworkings/place_reserv.html', context)", "def confirm_car_reservation():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n today = datetime.date.today()\n if request.method == 'POST':\n car_id = request.form['hidden-car-id']\n car = get_car_identified_by_id(car_id)\n date_from = request.form['hidden-date-from']\n date_to = request.form['hidden-date-to']\n if not are_dates_valid(date_from, date_to):\n if check_authentication(session_id, user_id):\n return render_template('car_details.html', car=car, error=\"Please insert a valid date interval!\",\n user=user_id, session_id=session_id, today=today)\n else:\n return render_template('car_details.html', car=car, error=\"Please insert a valid date interval!\", today=today)\n if is_car_available_in_the_selected_period(date_from, date_to, car_id):\n if check_authentication(session_id, user_id):\n if has_user_age_requirement(user_id, car_id):\n reservation_id = save_car_reservation(car_id, user_id, date_from, date_to)\n return render_template('car_reservation_details.html', user=user_id, session_id=session_id,\n reservation_id=reservation_id, car=car, date_from=date_from, date_to=date_to,\n total_price=calc_total_price(car.price, date_from, date_to),\n reservation_just_completed=True)\n else:\n error_msg = \"The reservation has failed because you are not at least \" + str(car.min_age) +\\\n \" years old!\"\n return render_template('car_details.html', user=user_id, session_id=session_id,\n error=error_msg, car=car, today=today)\n else:\n return render_template('car_details.html', car=car,\n error=\"You need to be authenticated in order to complete this action!\", today=today)\n else:\n if check_authentication(session_id, user_id):\n return render_template('car_details.html', car=car, is_available=False, show_confirm_div=True,\n date_from=date_from, date_to=date_to, user=user_id, session_id=session_id, today=today)\n else:\n return render_template('car_details.html', car=car, is_available=False, show_confirm_div=True,\n date_from=date_from, date_to=date_to, today=today)\n else:\n if check_authentication(session_id, user_id):\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), user=user_id,\n session_id=session_id, authjs=False, preview_length=get_cars_preview().__len__())\n else:\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=False,\n preview_length=get_cars_preview().__len__(), del_session_cookie=True)", "def get_reservation_name(self):\n return self.reservation_holder.get_name()", "def can_reserve(train_id,segment_id):\n cursor.execute(\"\"\"select freeseat from seats_free where train_id= %s and segment_id= %s\"\"\", [train_id,segment_id]) # query\n available_seats = cursor.fetchone() # fetch all reservations related to that passenger\n print(available_seats)\n if available_seats[0] == 448:\n return False;\n return True;", "def extend_reservation(self, key):\n\t\tif self.client is None:\n\t\t\traise UsageError(\"Not connected!\")\n\t\treturn self.client.extend_reservation(key)", "def create_reservations(payload, user_id):\n error = False\n # get posted data from json request\n body = request.get_json()\n keys = body.keys()\n # if request does not have json body, abort 400\n if body is None:\n abort(400)\n # if json does not have key 'auth0_id', abort 400\n if 'auth0_id' not in keys:\n abort(400)\n # if json does not have key 'reservation', abort 400\n if 'reservations' not in keys:\n abort(400)\n # if auth0_id in body does not match auth0_id in payload, abort 401\n if body['auth0_id'] != payload['sub']:\n abort(401)\n\n # query who is accessing\n access_user = User.query.filter_by(auth0_id=payload['sub']).first()\n # check if user_id in URL matches the access user id\n if user_id != access_user.id:\n raise AuthError({\n 'code': 'Invalid_claims',\n 'description': 'Unauthorized access by user'\n }, 401)\n\n # query clothes and store them in variable \"clothes\"\n if not isinstance(body['reservations'], list):\n abort(400)\n for value in body['reservations']:\n if not isinstance(value, int):\n abort(400)\n # check if all clothes indeed exist\n clothes = []\n for clothes_id in body['reservations']:\n # query clothes\n selection = Clothes.query.get(clothes_id)\n if selection is None:\n abort(404)\n # if that clothes has been already reserved, abort 422\n if selection.status == \"reserved\":\n abort(422)\n clothes.append(selection)\n\n # query user\n user = User.query.get(user_id)\n formatted_user = user.format()\n\n # make reservations\n try:\n reservations = []\n formatted_clothes = []\n for item in clothes:\n new_reservation = Reserve()\n new_reservation.user = user\n new_reservation.clothes = item\n item.status = \"reserved\"\n reservations.append(new_reservation)\n # commit these reservations\n for reservation in reservations:\n reservation.insert()\n formatted_clothes.append(reservation.clothes.format())\n except Exception:\n # rollback all sessions\n for reservation in reservations:\n reservation.rollback()\n error = True\n print(sys.exc_info())\n finally:\n # close all sessions\n for reservation in reservations:\n reservation.close_session()\n\n if error:\n abort(422)\n else:\n return jsonify({\n 'success': True,\n 'clothes': formatted_clothes,\n 'user': formatted_user\n })", "def get(self, flight_id):\n booking_date = request.args.get('bdate', datetime.now().strftime('%b %d %Y'), type=str)\n current_user = get_jwt_identity()\n try:\n flight = get_flight(flight_id)\n if not flight:\n return generate_response('Selected flight not available', 400)\n\n bookings_data = filter_booking_by_flight(flight_id)\n bookings = [booking.serialize() for booking in bookings_data\n if booking.booking_date.strftime('%b %d %Y') == booking_date]\n response = {'booking_details': bookings,\n 'number_of_booking': len(bookings),\n 'message': \"Data retrived successfully\"}\n return jsonify(response), 200\n except Exception as e:\n db.session.rollback()\n return jsonify({'error': str(e)}), 401", "def retrieve_user_reservations(payload, user_id):\n # check if that user indeed exists\n user = User.query.get(user_id)\n if user is None:\n abort(404)\n # querying who is accessing and check role\n access_user = User.query.filter_by(auth0_id=payload['sub']).first()\n role = access_user.role\n # if user role is \"user\", check if access user_id matches\n if role == 'user' and access_user.id != user_id:\n raise AuthError({\n 'code': 'Invalid_claims',\n 'description': 'Unauthorized access by user'\n }, 401)\n\n # query reserations\n reservations = Reserve.query.filter_by(user_id=user_id).all()\n # query clothes\n clothes = []\n for reservation in reservations:\n clothes.append(reservation.clothes.format())\n\n return jsonify({\n 'success': True,\n 'clothes': clothes,\n 'user': user.format()\n })", "def insert_reservation(house, id, check_in_date, check_in_time, check_out_date, guest_name,\n guest_cell, guest_telegram, num_guest, comment, confirm):\n sql = \"\"\"INSERT INTO %s VALUES(%s, '%s', '%s', '%s', '%s', '%s', '%s', %s, '%s', %s) RETURNING reservation_id;\"\"\"\n conn = None\n reservation_id = None\n try:\n # read database configuration\n params = config()\n # connect to the PostgreSQL database\n conn = psycopg2.connect(**params)\n # create a new cursor\n cur = conn.cursor()\n # execute the INSERT statement\n print(sql % (house, id, check_in_date, check_in_time, check_out_date, guest_name,\n guest_cell, guest_telegram, num_guest, comment, confirm))\n cur.execute(sql, (house, id, check_in_date, check_in_time, check_out_date, guest_name,\n guest_cell, guest_telegram, num_guest, comment, confirm))\n # get the generated id back\n vendor_id = cur.fetchone()[0]\n # commit the changes to the database\n conn.commit()\n # close communication with the database\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n\n return reservation_id", "def view_reservations(request):\n if not request.user.is_superuser:\n messages.error(request, \"Sorry, you don't have access to this \\\n part of the site.\")\n return redirect(reverse('home'))\n\n reservations = Reservation.objects.all()\n\n past_reservations = []\n upcoming_reservations = []\n arrivals_today = []\n arrivals_next = []\n departures = []\n inhouse_guests = []\n for reservation in reservations:\n for item in reservation.lineitems.all():\n if item.check_in and item.check_out < datetime.today().date():\n if reservation not in past_reservations:\n past_reservations.append(reservation)\n elif item.check_in > datetime.today().date():\n if reservation not in upcoming_reservations:\n upcoming_reservations.append(reservation)\n\n for reservation in reservations:\n for item in reservation.lineitems.all():\n if item.check_in == datetime.today().date():\n if reservation not in arrivals_today:\n arrivals_today.append(reservation)\n elif item.check_out == datetime.today().date():\n if reservation not in departures:\n departures.append(reservation)\n elif item.check_in < datetime.today().date() and item.check_out > datetime.today().date():\n if reservation not in inhouse_guests:\n inhouse_guests.append(reservation)\n elif item.check_in > datetime.today().date() and item.check_in < datetime.today().date() + timedelta(days=7):\n if reservation not in arrivals_next:\n arrivals_next.append(reservation)\n\n template = 'profiles/reservations.html'\n context = {\n \"reservations\": reservations,\n \"past_reservations\": past_reservations,\n \"upcoming_reservations\": upcoming_reservations,\n \"arrivals_today\": arrivals_today,\n \"arrivals_next\": arrivals_next,\n \"departures\": departures,\n \"inhouse_guests\": inhouse_guests,\n }\n\n return render(request, template, context)", "def test_reservation_id_one_instance(self):\n (refs, resv_id) = self.compute_api.create(self.context,\n self.default_flavor,\n image_href=uuids.image_href_id)\n self.assertEqual(len(refs), 1)\n self.assertEqual(refs[0]['reservation_id'], resv_id)", "def update_reservation(self, reservation_id, gs_id, vehicle_id, user_id):\n\n # find the reservation\n reserv_model = Reservation(self.settings)\n r = reserv_model.find_reservation(reservation_id)\n\n # update the reservation\n status = r.update(gs_id, vehicle_id, user_id)\n\n # find the updated reservation\n r = reserv_model.find_reservation(reservation_id)\n \n # return the model\n json_model = r.to_json()\n return status, json_model", "def reservations_by_recurring_allocation(self):\n\n allocation_id = self.recurring_allocation_id\n allocation = Session.query(Allocation).get(allocation_id)\n if not allocation:\n return None\n\n reservation_tokens = [each.reservation_token for each\n in allocation.reserved_slots]\n return Session.query(Reservation).filter(\n Reservation.token.in_(reservation_tokens)\n )", "def schedule_reservation(reservation_date,reservation_time,party_size,restaurant_name,first_name,restaurant_address):\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('calendar', 'v3', credentials=creds)\n\n # Call the Calendar API\n now = datetime.datetime.utcnow()\n\n reservation_day=reservation_date.split('/')[0]\n reservation_month =reservation_date.split('/')[1]\n reservation_year =reservation_date.split('/')[2]\n reservation_date = reservation_year+'-'+reservation_month+'-'+reservation_day\n start_time_hr= reservation_time[:2]\n end_time_hr= int(reservation_time[:2])+4\n start_time_min= reservation_time[2:]\n end_time_min=start_time_min\n \n \n event = {\n 'summary': 'Reservation at '+restaurant_name,\n 'location': restaurant_address,\n 'description': 'Reservation for '+party_size+' under '+first_name+' made on '+str(now),\n 'start': {\n 'dateTime': reservation_date+'T'+start_time_hr+':'+start_time_min+':00+08:00',\n 'timeZone': 'Asia/Singapore',\n },\n 'end': {\n 'dateTime': reservation_date+'T'+str(end_time_hr)+':'+end_time_min+':00+08:00',\n 'timeZone': 'Asia/Singapore',\n },\n 'reminders': {\n 'useDefault': False,\n 'overrides': [\n {'method': 'email', 'minutes': 24 * 60},\n {'method': 'popup', 'minutes': 10},\n ],\n },\n }\n\n event = service.events().insert(calendarId='primary', body=event).execute()\n print ('Event created: %s', (event.get('htmlLink')))", "def reservation_delete(token_user, res_id):\n res = Reservation.query.get(res_id)\n if res is None:\n abort(404, 'reservation not found')\n\n if not token_user.has_permission('reservation.delete.elevated'):\n is_my_reservation = any(map(lambda m: m.id == token_user.id,\n res.team.members))\n if not (is_my_reservation and\n token_user.has_permission('reservation.delete')):\n abort(403, 'insufficient permissions to delete reservation')\n\n get_db().delete(res)\n get_db().commit()\n\n return '', 204", "def handle_post(self, request, **kwargs):\n try:\n form = ReservationForm(request.POST)\n if form.is_valid():\n return self.form_valid(form, **kwargs)\n except Exception:\n pass\n return self.get(request, **kwargs)", "def __init__(__self__,\n resource_name: str,\n args: Optional[ReservationArgs] = None,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def get_reservations(restaurant: Restaurant, num_of_reservations: int = 30, page: int = 1):\n reservations = Reservation.query.filter(Reservation.restaurant == restaurant).offset(\n (page - 1) * num_of_reservations).limit(num_of_reservations + 1).all()\n\n more = len(reservations) > num_of_reservations\n reservations.pop() if more else None\n\n return reservations, more", "def save_car_reservation(car_id, username, date_from, date_to):\n car = get_car_identified_by_id(car_id)\n price = calc_total_price(car.price, date_from, date_to)\n session = start_session()\n new_car_reservation = CarReservation(car_id, username, date_from, date_to, price)\n session.add(new_car_reservation)\n session.commit()\n queryset = session.query(CarReservation).filter(and_(CarReservation.id_car.__eq__(car_id),\n CarReservation.id_user.__eq__(username),\n CarReservation.date_from.__eq__(date_from),\n CarReservation.date_to.__eq__(date_to),\n CarReservation.price.__eq__(price)))\n reservation = queryset2list(queryset)[0]\n session.close()\n return reservation.id_reservation", "def get_ip_reservation_with_http_info(self, name, **kwargs):\n\n all_params = ['name', 'cookie']\n all_params.append('async')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_ip_reservation\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `get_ip_reservation`\")\n\n\n collection_formats = {}\n\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = []\n\n header_params = {}\n if 'cookie' in params:\n header_params['Cookie'] = params['cookie']\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/oracle-compute-v3+json'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/oracle-compute-v3+json'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api('/ip/reservation/{name}', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='IPReservationResponse',\n auth_settings=auth_settings,\n async=params.get('async'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def create_booking(self, request):\n model = AssignmentModelFromDynamo('assignment', 'params')\n\n model.save()\n\n return model", "def read_existing_reservations():\n reservations = []\n list_of_lines = open(\"reservations.csv\").readlines()\n for each in list_of_lines:\n reservations.append(parse_one_record(each.strip()))\n return reservations", "def test_reservation(site):\n\n # Reserved network\n reserved = models.Network.objects.create(\n site=site, cidr=u'192.168.3.0/24', state='reserved'\n )\n\n # No networks or addresses!\n assert reserved.get_next_network(28) == []\n assert reserved.get_next_address(num=3) == []\n\n # And just to make sure it's working, set the Network to 'allocated':\n reserved.state = models.Network.ALLOCATED\n nets = [u'192.168.3.0/28']\n assert reserved.get_next_network(28, as_objects=False) == nets\n\n addresses = [u'192.168.3.1/32', u'192.168.3.2/32', u'192.168.3.3/32']\n assert reserved.get_next_address(num=3, as_objects=False) == addresses", "def get_reservations_of_the_day(restaurant: Restaurant, num_of_reservations: int = 30, page: int = 1):\n today = datetime.date.today()\n \n #la query demmerda\n reservations = Reservation.query.filter(Reservation.restaurant == restaurant).filter(\n datetime.datetime(today.year, today.month, today.day) < Reservation.reservation_time).filter(\n Reservation.reservation_time < datetime.datetime(today.year, today.month, today.day + 1)).offset(\n (page - 1) * num_of_reservations).limit(num_of_reservations + 1).all()\n\n more = len(reservations) > num_of_reservations\n reservations.pop() if more else None\n\n return reservations, more", "def get_ec2_reservations(profile, running_filter):\n try:\n ec2_client = boto3.Session(profile_name=profile).client('ec2')\n except ProfileNotFound:\n print(\"Profile: %s not found\" % profile, file=sys.stderr)\n sys.exit(1)\n filtered_instances = ec2_client.describe_instances(Filters=running_filter)\n return filtered_instances['Reservations']", "async def cancel_reservation_endpoint(request):\n reservation_id = request.args[\"reservation_id\"][0]\n model.cancel_reservation(reservation_id)\n return json({\"success\": True})", "def get_ride_request(reqID):\n req = RideRequest.query.get(reqID)\n return req", "def __get_reservations(self, instance_ids=None):\n if instance_ids:\n self.__validate_instance_id(instance_ids)\n euca_conn = self.__make_connection()\n try:\n return euca_conn.get_all_instances(instance_ids)\n except:\n euca.display_error_and_exit('%s' % ex)\n return False", "def get_booking_at(self, datetime):\n for booking in self.booking_set.all():\n if booking.schedule_start <= datetime < booking.schedule_end and not booking.is_cancelled():\n return booking\n return None", "def get(self, id_cliente):\n cliente = get_cliente_id(id_cliente)\n if not cliente:\n api.abort(404)\n else:\n return cliente", "def get_ip_reservation(self, name, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async'):\n return self.get_ip_reservation_with_http_info(name, **kwargs)\n else:\n (data) = self.get_ip_reservation_with_http_info(name, **kwargs)\n return data", "def get(self, request, format=None):\n mess = Booking.objects.filter(\n accommodation__owner=request.user).order_by('-pk')\n if mess:\n serializer = self.serializer_class(mess, many=True)\n return Response(serializer.data, status=200)\n return Response([], status=200)", "def get_available_time_slot():\n try:\n time_slot_set_list = list()\n # Read all time slot from database\n with open(InterviewCalendarApi.DB_FILE, \"r\") as fd:\n for line in fd:\n time_slot_list = list()\n (_,_,_, time_slots) = line.strip().split(\"|\")\n for time_slot in time_slots.split(\",\"):\n (from_time_slot, to_time_slot) = list(map(int, time_slot.split(\"-\")))\n time_slot_list.extend(range(from_time_slot, (to_time_slot + 1)))\n # Get all available time slot for every user\n time_slot_set_list.append(set(time_slot_list))\n \n # Find common time slot between multiple parties\n available_slots = list(set.intersection(*time_slot_set_list))\n\n msg = json.dumps({\"Status\": \"Success\", \"available_slots\": available_slots})\n return make_response(msg, 200, InterviewCalendarApi.HEADERS)\n except:\n err_msg = sys.exc_info()\n error = json.dumps({'error': 'Unable to find time slot due to error: %s' %str(err_msg)})\n return make_response(error, 401, InterviewCalendarApi.HEADERS)", "def test_get_cve_id_by_time_reserved(reg_user_headers):\n n_ids = 10\n time.sleep(1)\n t_before = get_now_timestamp()\n time.sleep(1)\n res_ids = get_reserve_cve_ids(n_ids, utils.CURRENT_YEAR, reg_user_headers['CVE-API-ORG'])\n time.sleep(1)\n t_after = get_now_timestamp()\n\n res_get_ids = requests.get(\n f'{env.AWG_BASE_URL}{CVE_ID_URL}',\n headers=utils.BASE_HEADERS,\n params={\n 'time_reserved.lt': t_after,\n 'time_reserved.gt': t_before\n }\n )\n ok_response_contains(res_get_ids, f'CVE-{utils.CURRENT_YEAR}-')\n assert len(json.loads(res_get_ids.content.decode())['cve_ids']) == n_ids", "def get(self, id=None, o=None):\n\n response = []\n current_user = self.get_current_user()\n\n # [?timestamp_start=<XXX>&timestamp_end=<XXX>]\n ts = self.get_argument('timestamp_start',None)\n te = self.get_argument('timestamp_end',None)\n\n # GET /resources\n if not id and not o and not ts and not te:\n cursor = yield r.table('resources') \\\n .run(self.dbconnection)\n while (yield cursor.fetch_next()):\n item = yield cursor.next()\n response.append(item)\n\n # GET /resources?timestamp_start=<XXX>&timestamp_end=<XXX>\n elif not id and not o:\n try:\n nb_leases = yield r.table(\"leases\").count().run(self.dbconnection)\n if nb_leases > 0:\n # Resources NOT in Leases\n cursor = yield r.table('resources') \\\n .filter({'available':'true'}) \\\n .filter( lambda resource:\n r.table(\"leases\").map(lambda l:\n l['resources'].coerce_to('array')\n ).reduce(lambda left, right:\n left.set_union(right)\n ).contains(resource['id']).not_() \\\n ).run(self.dbconnection)\n while (yield cursor.fetch_next()):\n item = yield cursor.next()\n response.append(item)\n\n if ts and te:\n # List of Resources ids in Leases but not in the given time range\n in_leases = yield r.table(\"leases\").filter(lambda l:\n r.or_(l['start_time'].gt(int(te)),l['end_time'].lt(int(ts)))\n ).map(lambda l:\n l['resources'].coerce_to('array')\n ).reduce(lambda left, right:\n left.set_union(right)\n ).map(lambda x:\n r.table('resources').get(x) \\\n ).run(self.dbconnection)\n logger.debug(in_leases)\n response = response + in_leases\n\n if ts and not te:\n # List of Resources ids in Leases but not in the given time range\n in_leases = yield r.table(\"leases\").filter(lambda l:\n l['end_time'].lt(int(ts))\n ).map(lambda l:\n l['resources'].coerce_to('array')\n ).reduce(lambda left, right:\n left.set_union(right)\n ).map(lambda x:\n r.table('resources').get(x) \\\n ).run(self.dbconnection)\n response = response + in_leases\n\n if not ts and te:\n # List of Resources ids in Leases but not in the given time range\n in_leases = yield r.table(\"leases\").filter(lambda l:\n l['start_time'].gt(int(te))\n ).map(lambda l:\n l['resources'].coerce_to('array')\n ).reduce(lambda left, right:\n left.set_union(right)\n ).map(lambda x:\n r.table('resources').get(x) \\\n ).run(self.dbconnection)\n response = response + in_leases\n else:\n # All available Resources (No Leases in DB)\n cursor = yield r.table('resources') \\\n .filter({'available':'true'}) \\\n .run(self.dbconnection)\n while (yield cursor.fetch_next()):\n item = yield cursor.next()\n response.append(item)\n except Exception as e:\n logger.exception(e)\n\n # GET /resources/<id>\n elif not o and id and self.isUrn(id):\n\n cursor = yield r.table('resources') \\\n .filter({'id': id}) \\\n .run(self.dbconnection)\n while (yield cursor.fetch_next()):\n item = yield cursor.next()\n response.append(item)\n # GET /resources/<id>/leases\n elif id and self.isUrn(id) and o == 'leases':\n cursor = yield r.table(o) \\\n .filter(lambda lease: lease[\"resources\"].contains(id)) \\\n .run(self.dbconnection)\n #\n\n while (yield cursor.fetch_next()):\n item = yield cursor.next()\n response.append(item)\n # GET /resources/<id>/slices\n elif id and self.isUrn(id) and o == 'slices':\n cursor = yield r.table(o) \\\n .filter(lambda slice: slice[\"resources\"]==id) \\\n .run(self.dbconnection)\n #\n\n while (yield cursor.fetch_next()):\n item = yield cursor.next()\n response.append(item)\n # GET /resources/<id>/testbeds\n elif id and self.isUrn(id) and o == 'testbeds':\n cursor = yield r.table('resources') .filter({'id': id}) \\\n .pluck('id','testbed','manager') \\\n .merge(lambda res: {\n 'testbeds': r.table('testbeds').get_all(res['testbed'], index='id') \\\n .coerce_to('array')\n }) \\\n .run(self.dbconnection)\n while (yield cursor.fetch_next()):\n item = yield cursor.next()\n response.append(item)\n else:\n self.userError(\"invalid request\")\n\n return\n\n self.finish(json.dumps({\"result\": response}, cls=myJSONEncoder))", "def delete_reservation(request, reservation_number):\n if not request.user.is_superuser:\n messages.error(request, \"Sorry, you don't have access to this \\\n part of the site.\")\n return redirect(reverse('home'))\n \n reservation = get_object_or_404(Reservation,\n reservation_number=reservation_number)\n reservation.delete()\n messages.info(request, f'Reservation with reservation number {reservation_number}\\\n has been successfully deleted.')\n return redirect('view_reservations')", "def detele_car_reservation():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n reservation_id = request.args.get('reservation-id', None)\n today = datetime.date.today()\n if check_authentication(session_id, user_id) and is_reservation_of_the_user(reservation_id, user_id):\n delete_reservation(reservation_id)\n reservations_list = get_user_reservations_list(user_id)\n cars_reservations_list = get_cars_user_reservations_list(reservations_list)\n reservations_status_list = get_reservations_status_list(reservations_list)\n user = get_user_by_id(user_id)\n return render_template('user_area.html', user=user_id, session_id=session_id, edit_mode=False,\n surname=user.surname, name=user.name, birthdate=user.birthdate, today=today,\n reservations_list=reservations_list, cars_reservations_list=cars_reservations_list,\n reservations_status_list=reservations_status_list)\n else:\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=False,\n preview_length=get_cars_preview().__len__(), del_session_cookie=True)", "def booking_show(id):\n booking = Booking.query.get(id)\n payment = Payment.query.filter_by(booking_id=id).first()\n return render_template(\"bookings_single.html\", booking=booking,\n payment=payment)", "def validate_and_save(self, reservation, form):\n if not reservation.validate():\n context_data = self.get_context_data(reservation=reservation)\n context_data[\"error\"] = self.get_error_message(form, reservation)\n return render(self.request, self.template_name, context_data)\n\n reservation.save()\n return redirect(calendar_url_reservation(reservation))", "def post(self):\r\n\t\tdataResponse = defaultResponse\r\n\t\tq = \"\"\"\tUPDATE scooters\r\n\t\t\t\tSET is_reserved = false\r\n\t\t\t\tWHERE id={id.int}\r\n\t\t\t\tAND is_reserved = true\r\n\t\t\t\tRETURNING true;\r\n\t\t\t\t\"\"\"\r\n\t\tpayload, newQuery = Validator.validatePayload(q, request)\r\n\t\tif payload:\r\n\t\t\t# End reservation success code\r\n\t\t\tdataResponse = getQueryResponse(payload, newQuery, queryType='update')\r\n\r\n\t\tif dataResponse[0]:\r\n\t\t\tq = \"\"\"SELECT ST_Distance(location, ST_MakePoint({endlng.float}, {endlat.float})) FROM scooters WHERE id = {id.int}\"\"\"\r\n\t\t\tpayload, newQuery = Validator.validatePayload(q, request)\r\n\t\t\tif payload:\r\n\t\t\t\t# Charge customer and update scooter location.\r\n\t\t\t\tdistanceTraveled = getQueryResponse(payload, newQuery, queryType='query')[0]\r\n\t\t\t\tdistanceTraveled = distanceTraveled[0]\r\n\t\t\t\twhile type(distanceTraveled) is tuple:\r\n\t\t\t\t\tdistanceTraveled = distanceTraveled[0]\r\n\t\t\t\tdistanceTraveled = roundup(distanceTraveled) if distanceTraveled > 0 else 1 # Min distance traveled is always 1.\r\n\t\t\t\tpricePerMeter = 1.0 # Ideally, this value is should not be hard coded\r\n\t\t\t\tfareCost = pricePerMeter * distanceTraveled\r\n\r\n\t\t\t\tq = \"\"\"UPDATE users\r\n\t\t\t\t\tSET (last_fare, fares, scooter_ids, distances_traveled) = ({fareCost}::real, array_append(fares, {fareCost}::real),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tarray_append(scooter_ids, {id.int}::bigint),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tarray_append(distances_traveled, {distanceTraveled}::bigint))\r\n\t\t\t\t\tWHERE id={userid.int};\r\n\r\n\t\t\t\t\tUPDATE scooters\r\n\t\t\t\t\tSET (lon, lat, distances_traveled, rider_ids, location) = ({endlng.float}, {endlat.float},\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tarray_append(distances_traveled, {distanceTraveled}::bigint),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tarray_append(rider_ids, {userid.int}::bigint),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tST_POINT({endlng.float}, {endlat.float}))\r\n\t\t\t\t\tWHERE id = {id.int};\r\n\r\n\t\t\t\t\t\"\"\"\r\n\t\t\t\tq = q.replace('{fareCost}', str(fareCost)).replace('{distanceTraveled}', str(distanceTraveled)) # Partial format subtitution\r\n\r\n\t\t\t\tpayload, newQuery = Validator.validatePayload(q, request)\r\n\t\t\t\tif payload:\r\n\t\t\t\t\t_ = getQueryResponse(payload, newQuery, queryType='update')\r\n\r\n\t\treturn dataResponse", "def test_one_reserveation(self):\n test_booking = create_test_booking(self.user, self.first_day, 11)\n\n response = self.client.get(\n reverse('bookings', kwargs={'facility': 'g'}))\n\n bookings = response.context[\"bookings\"]\n\n for week in bookings:\n for row in week.rows:\n for block in row.blocks:\n if block.date == test_booking.date:\n self.assertEqual(type(block), BlockReserved)\n else:\n self.assertEqual(type(block), BlockAvailable)", "def retrieve(self, request, pk=None):\n try:\n movie_night = MovieNight.objects.get(pk=pk)\n serializer = MovieNightSerializer(movie_night, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)", "def test_reservation_ids_two_instances(self):\n (refs, resv_id) = self.compute_api.create(self.context,\n self.default_flavor,\n image_href=uuids.image_href_id,\n min_count=2, max_count=2)\n self.assertEqual(len(refs), 2)\n self.assertIsNotNone(resv_id)\n for instance in refs:\n self.assertEqual(instance['reservation_id'], resv_id)", "def restaurant_only():\n work_time = {\n \"Понедельник\": \"8:00-23:00\",\n \"Вторник\": \"8:00-23:00\",\n \"Среда\": \"8:00-23:00\",\n \"Четверг\": \"8:00-23:00\",\n \"Пятница\": \"8:00-23:00\",\n \"Суббота\": \"8:00-23:00\",\n \"Воскресенье\": \"Выходной\",\n }\n restaurant = Restaurant(\"Снежинка\", work_time, False)\n return restaurant", "def get_all_reservations(config):\n reservations = []\n region_list = regions(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for region in region_list:\n _logger.info(\"Searching %s\", region)\n cnx = region.connect(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for reservation in cnx.get_all_instances():\n _logger.info(\"Found %s %s\", reservation,\n [str(i.id) for i in reservation.instances])\n reservations.append(reservation)\n return reservations", "def get(self, registration):\n request = Car.read(registration)\n return {'status':'success', 'message': 'Fetch successful', 'data': request}", "def request_booking_details(self, poll_url, **params):\n return self.make_request(\"%s/booking\" % poll_url,\n method='put',\n callback=lambda resp: resp.headers['location'],\n **params)", "def get_ride_request(startDate, endDate):\n\n results_list = []\n\n session = DB_SESSION()\n\n results = []\n\n results = session.query(Request).filter(Request.date_created >= startDate, Request.date_created <=endDate)\n\n for result in results:\n results_list.append(result.to_dict())\n print(result.to_dict())\n\n session.close()\n\n return results_list, 200", "def get_free_ip(reservations,node,networkname):\n ips=[]\n iprange=''\n for reservation in sorted(reservations, key=lambda r: r.id, reverse=True):\n if reservation.next_action != \"DEPLOY\":\n continue\n rnetworks = reservation.data_reservation.networks\n for network in rnetworks:\n if network.name == networkname:\n for netres in network.network_resources:\n if netres.node_id == node:\n iprange = netres.iprange\n\n rcontainer = reservation.data_reservation.containers\n for container in rcontainer:\n if container.node_id == node:\n for netcon in container.network_connection:\n if netcon.network_id == networkname:\n ips.append(netcon.ipaddress)\n\n rkubernetes = reservation.data_reservation.kubernetes\n for kubernetes in rkubernetes:\n if kubernetes.node_id == node:\n ips.append(kubernetes.ipaddress)\n\n\n\n # asuming /24 !!\n if iprange == '':\n print(\"error: no network found for:\",networkname)\n sys.exit(1)\n nodenet = iprange[0:-4]\n #search first free IP\n i = 1\n free_ip = ''\n while i<254:\n i+=1\n free_ip = nodenet+str(i)\n if free_ip not in ips:\n break\n # todo: check if free_ip is a valid IP\n return free_ip", "def get_room(self, name):\n for i in self.rooms:\n if self.rooms[i].name == name:\n return self.rooms[i]\n raise RuntimeError, \"Room '%s' not known\" % name", "def get(self, room_id):\n room = redis_store.get(room_id)\n if room is None:\n # raise 404\n raise ResourceDoesNotExist('Resource not found.')\n else:\n room = loads(room)\n return {'host_id': room.get('host_id')}", "def cancel_room():\n try:\n user = User.get_user()\n except ValueError as err:\n return jsonify({\"error\": str(err)})\n\n booking_id = request.form.get(\"booking_id\")\n if not booking_id:\n return jsonify({\"error\": \"No booking id sent to server!\"})\n if \",\" in booking_id:\n return jsonify({\"error\": \"Only one booking may be cancelled at a time.\"})\n\n booking = StudySpacesBooking.query.filter_by(booking_id=booking_id).first()\n if booking:\n if (booking.user is not None) and (booking.user != user.id):\n return jsonify({\"error\": \"Unauthorized: This reservation was booked by someone else.\"}), 400\n if booking.is_cancelled:\n return jsonify({\"error\": \"This reservation has already been cancelled.\"}), 400\n\n if booking_id.isdigit():\n sessionid = request.form.get(\"sessionid\")\n if not sessionid:\n return jsonify({\"error\": \"No session id sent to server.\"}), 400\n try:\n wharton.delete_booking(sessionid, booking_id)\n save_wharton_sessionid()\n if booking:\n booking.is_cancelled = True\n sqldb.session.commit()\n else:\n save_booking(\n lid=1,\n email=user.email,\n booking_id=booking_id,\n is_cancelled=True,\n user=user.id\n )\n return jsonify({'result': [{\"booking_id\": booking_id, \"cancelled\": True}]})\n except APIError as e:\n return jsonify({\"error\": str(e)}), 400\n else:\n resp = studyspaces.cancel_room(booking_id)\n if \"error\" not in resp:\n if booking:\n booking.is_cancelled = True\n sqldb.session.commit()\n else:\n save_booking(\n email=user.email,\n booking_id=booking_id,\n is_cancelled=True,\n user=user.id\n )\n return jsonify({'result': resp})", "def handle_find_slot(date=None):\n if not date:\n session.attributes['stage'] = 'book_slot'\n return question('You didn\\'t specify the date. What date you would like to book?')\n else:\n print(date)\n params = {\n 'date': date\n }\n req = requests.get(config.API + '/find_slot', params=params)\n print(req.text)\n freeslots_string = get_time_strings(json.loads(req.text)['freesloats'])\n session.attributes['stage'] = 'find_slot'\n session.attributes['date'] = date\n return question(\n 'The free slots for ' + date + ' are ' + freeslots_string + ' Which one do you want me to book?')", "def FindVehicleInstance(vehicleID, accessToken):\n vehiclesDict = getVehicleDataAsDict()\n if vehicleID in vehiclesDict:\n return toVehicleInstance(vehicleID, vehiclesDict[vehicleID]) # return the vehicle instance (already in data.json)\n else:\n # doesn't exist in data.json, we'll add it into there\n vehicle = smartcar.Vehicle(vehicleID, accessToken)\n vehicleInfo = vehicle.info()\n vehicleOdometer = vehicle.odometer()['data']['distance']\n vehicleLatitude = vehicle.location()['data']['latitude']\n vehicleLongitude = vehicle.location()['data']['longitude']\n newVehicleInstance = Vehicle(vehicleID, vehicleInfo['make'], vehicleInfo['model'], vehicleInfo['year'], [vehicleOdometer], [(vehicleLatitude, vehicleLongitude)], accessToken)\n\n updateDictionary(vehicleID, newVehicleInstance.VehicleToDict())\n\n return newVehicleInstance", "def look_vacant_offices(request):\n if request.GET:\n if request.GET['datetime_from'] and request.GET['datetime_to']:\n offices = NumberOffice.objects.all()\n reservations = Reservation.objects.all()\n post_from = request.GET['datetime_from']\n post_to = request.GET['datetime_to']\n filteroffice = reservations.all().filter(\n datetime_from__gte=post_from, datetime_to__lte=post_to\n )\n reservednumberoffice = set()\n # set reserved office for corect time\n for i in filteroffice:\n reservednumberoffice.add(i.number_office)\n context = {'offices': offices, \"reservednumberoffice\": reservednumberoffice}\n return render(request, 'coworkings/vacant_offices.html', context)\n else:\n text = 'Enter the correct data or fill in all fields.'\n context = {'text': text}\n return render(request, 'coworkings/look_vacant_offices.html', context)\n else:\n return render(request, 'coworkings/look_vacant_offices.html')", "def get(self, vehicle_id):\n vehicle = VehicleServices(public_id=vehicle_id).get_an_item()\n if not vehicle:\n api.abort(404)\n else:\n return vehicle", "def get(self, eventId):\n check_type(eventId, basestring)\n\n # API request\n json_data = self._session.get(API_ENDPOINT + '/' + eventId)\n\n # Return a room object created from the response JSON data\n return self._object_factory(OBJECT_TYPE, json_data)", "def get_queryset(self):\n return Reserva.objects.filter(\n cliente=self.request.user)", "def cancel_reservation(payload, clothes_id):\n selection = Reserve.query.filter_by(clothes_id=clothes_id).all()\n # if the given clothes has not been reserved, abort 404\n if len(selection) == 0:\n abort(404)\n # if two or more user reserved the same clothe, abort umprocessable\n if len(selection) >= 2:\n abort(422)\n # check if access user_id matches reservation user_id\n reservation = selection[0]\n # querying who is accessing and check role\n access_user = User.query.filter_by(auth0_id=payload['sub']).first()\n role = access_user.role\n # if user role is \"user\", check if access user_id matches\n # reservation user_id\n reservation_user = reservation.user\n if role == 'user' and access_user.id != reservation_user.id:\n raise AuthError({\n 'code': 'Invalid_claims',\n 'description': 'Unauthorized access by user'\n }, 401)\n\n # query clothes\n clothes = reservation.clothes\n\n # set error status\n error = False\n # cancel that reservation\n try:\n clothes.status = \"\"\n reservation.delete()\n formatted_clothes = clothes.format()\n formatted_user = reservation_user.format()\n except Exception:\n reservation.rollback()\n error = True\n print(sys.exc_info())\n finally:\n reservation.close_session()\n clothes.close_session()\n\n if error:\n abort(422)\n else:\n return jsonify({\n 'success': True,\n 'clothes': formatted_clothes,\n 'user': formatted_user\n })", "def post(self, flight_id):\n data = request.get_json()\n seat = 1\n if data:\n seat = data.get('seat')\n current_user = get_jwt_identity()\n try:\n flight = get_flight(flight_id)\n if not flight:\n return generate_response('Selected flight not available', 400)\n\n if seat == 1 and flight.booked_economy < flight.airplane.economy_seats:\n data = dict(booked_economy=flight.booked_economy+1)\n save_booking(current_user, flight_id)\n flight.update(flight, **data)\n return generate_response('Economy seat flight reservation successfull', 201)\n\n if seat == 2 and flight.booked_business < flight.airplane.business_seats:\n data = dict(booked_business=flight.booked_business+1)\n save_booking(current_user, flight_id)\n flight.update(flight, **data)\n return generate_response('Business seat flight reservation successfull', 201)\n\n except Exception as e:\n db.session.rollback()\n return jsonify({'error': str(e)}), 401", "def get(self, guid):\n key = db.Key.from_path('Task', int(guid))\n task = db.get(key)\n if not task == None:\n guid = \"%s\" % task.key().id_or_name()\n task_json = { \"id\": \"%s\" % guid, \"name\": task.name,\n \"priority\": task.priority, \"effort\": task.effort,\n \"projectId\": task.projectId,\n \"submitterId\": task.submitterId, \"assigneeId\": task.assigneeId,\n \"type\": task.type, \"developmentStatus\": task.developmentStatus,\n \"validation\": task.validation, \"description\": task.description,\n \"createdAt\": task.createdAt,\n \"updatedAt\": task.updatedAt }\n \n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(simplejson.dumps(task_json))\n else:\n self.response.set_status(404, \"Task not found\")", "def get_room(room_id):\n try:\n room_id = int(room_id)\n room_entry = read_criteria(Room,{\"id\":room_id},session)\n except ValueError:\n room_entry = None\n # if the provided id doesn't match any room in the db, return -1 to indicate not found\n if room_entry is None:\n room = {\"roomId\":-1}\n status_code = 404\n else:\n status_code = 200\n room = room_json(room_entry, session,app.config[\"OFFLINE_TESTING\"], login_session)\n return generate_response(room,status_code)", "def consultation_booking_query(self, cid, sid, time, date):\n if not self.check_course_exist(cid):\n return ConsultationError.INVALID_COURSE.value\n is_weekday, feedback = self.check_weekday(date)\n time = self.round_time(time)\n if is_weekday:\n try:\n avail_list = self.get_avail_time_slots(cid.upper(), date) # return available time slot list\n logger.debug(avail_list)\n if time in avail_list:\n self.add_consultation(cid, sid, time, date) # add into database\n self.emailer.send_confirm_booking(cid=cid, time=time, date=date, receiver='whatbot9900@gmail.com')\n return \"{}\".format(feedback)\n else:\n if not avail_list:\n return \"Sorry, there is no available time slot on date\"\n result = \"Sorry this time slot has been booked, \" \\\n \"please choose another one from following time slots on {}\".format(date)\n return '{}: {}'.format(result, ', '.join(avail_list))\n except ValueError:\n logger.error(\"Invalid Input\")\n return\n else:\n logger.debug(feedback)\n return feedback", "def get(congregation_guid):\n try:\n return Congregation.objects.get(guid=congregation_guid)\n except ObjectDoesNotExist:\n url = f'https://apps.jw.org/api/public/meeting-search/weekly-meetings/{congregation_guid}'\n with urllib.request.urlopen(url) as response:\n print(response.read())", "def find_seat(seat_id: SeatID) -> Optional[DbSeat]:\n return DbSeat.query.get(seat_id)", "def _get(self, id_: str) -> Union[DBModelInstance, NoReturn]:\n record = self.model.query.get(id_)\n if record:\n return record\n else:\n # raise error to correct handling wrong inputted params\n raise ServiceBadRequest()", "def form_valid(self, form, **kwargs):\n reservation = Reservation(start_time=form.cleaned_data[\"start_time\"],\n end_time=form.cleaned_data[\"end_time\"], user=self.request.user,\n machine=form.cleaned_data[\"machine\"], comment=form.cleaned_data[\"comment\"])\n\n if form.cleaned_data[\"event\"]:\n reservation.event = form.cleaned_data[\"event\"]\n\n if form.cleaned_data[\"special\"]:\n reservation.special = True\n reservation.special_text = form.cleaned_data[\"special_text\"]\n\n return self.validate_and_save(reservation, form)", "def SaveToReservationSQL(self, order, recipientid):\n\n # get counter, increase it, save counter, and use for reservation\n # managerid, recipientid,\n # insert reservation \\'{reservation_guid}\\\n # insert people\n # insert subclaim\n\n cursor = self.cursor\n\n reservation_guid = order[\"id\"]\n km_number = order[\"crmid\"]\n cursor.execute('select TOP 1 id from reservation where trash=0 and (guid=? or ndog=?) order by id desc',\n (reservation_guid, km_number))\n\n row = cursor.fetchone()\n if (not row):\n reservation_new = 1\n reservationid = None\n else:\n reservation_new = 0\n reservationid = row[0]\n\n # check subclaims\n # reservation_to_delete=row[0]\n # query='select id from subclaim where claimid=?'\n # cursor.execute(query,reservation_to_delete)\n # rows=cursor.fetchall()\n # if rows :\n # query='select number from reservation where id=?'\n # cursor.execute(query,reservation_to_delete)\n # row = cursor.fetchone()\n # self.number = row[0]\n\n # TODO - update existing reservation\n # return 0\n\n # query='update reservation set trash=1 where id=?'\n # cursor.execute(query,reservation_to_delete)\n\n # create reservation if it is missing\n\n if reservation_new == 0:\n\n cursor.execute('select number from reservation where id=? and trash=0', reservationid)\n row = cursor.fetchone()\n number = row[0]\n self.number = number\n\n else:\n number = km_number\n self.number = number\n\n print('Dogovor number ', number, 'KM', km_number, 'reservationid ', reservationid)\n\n manager_guid = order[\"manager\"][\"id\"]\n query = f'select id from recipient where guid=\\'{manager_guid}\\''\n cursor.execute(query)\n row = cursor.fetchone()\n humanid = row[0]\n\n guid = order[\"id\"]\n currency = order[\"cruises\"][0][\"currency\"]\n print(currency)\n\n date_created = datetime.fromisoformat(order[\"created\"][:order[\"created\"].find('.')])\n\n query = '''\ninsert into dbo.[reservation]\n([number], [cdate], [recipientid], [humanid], [officeid], [legalid], [statusid],\n [pdate], [currencyid],[ndog],[guid])\nvalues (?,?,?,?,?,?,?,?,?,?,?)\n'''\n\n # TODO officeid by manager, legalid by owner, statusid?\n ## if reservation is not exist create new, else update\n values = (\n km_number, date_created, recipientid, humanid, 29921, 136, 2, date_created, currencymap[currency],\n order[\"crmid\"],\n guid)\n print(values)\n if (reservation_new == 1) and (km_number):\n cursor.execute(query, values)\n cursor.execute(\"select IDENT_CURRENT('reservation')\")\n row = cursor.fetchone()\n id = row[0]\n cursor.execute('exec ChangesLog_AddNew ?,?,?,?,?,?,?,?,?,?,?,?,?', (\n 'robot python', 1, 'reservation', id, km_number, 'reservation', id, str(id), None, None, '', None, ''))\n\n\n elif (reservation_new == 0) and (km_number):\n update_query = \"\"\" update dbo.[reservation] \n set cdate = ?, recipientid=?, humanid = ?, officeid=?, legalid=?, statusid=?, pdate=?, currencyid=?, guid =?, ndog = ? where id=?\"\"\"\n cursor.execute(update_query, (\n date_created, recipientid, humanid, 29921, 136, 2, date_created, currencymap[currency], guid, km_number,\n reservationid))\n id = reservationid\n else:\n id = 0\n return id, reservation_new" ]
[ "0.7379199", "0.71298105", "0.70966446", "0.70289004", "0.6932207", "0.68829936", "0.66138154", "0.65487725", "0.64873415", "0.64082396", "0.6286343", "0.628403", "0.62172484", "0.6214408", "0.61116004", "0.6033761", "0.60133725", "0.5996568", "0.59386927", "0.5936628", "0.5919651", "0.5905735", "0.58974254", "0.58911747", "0.58231586", "0.58035326", "0.5756488", "0.57189953", "0.5649887", "0.563695", "0.5606813", "0.55697876", "0.5567084", "0.5542715", "0.55384", "0.5517696", "0.549399", "0.54875594", "0.5461244", "0.5444479", "0.54409164", "0.54310983", "0.5422673", "0.5400362", "0.53933567", "0.53910035", "0.535832", "0.5337333", "0.5296276", "0.5275404", "0.52744776", "0.5220651", "0.52111685", "0.51995933", "0.5195603", "0.5171114", "0.5171024", "0.51610464", "0.51599437", "0.51503634", "0.51234126", "0.51191175", "0.51087105", "0.5105927", "0.5077729", "0.50733256", "0.5055324", "0.5052101", "0.503482", "0.502737", "0.5027006", "0.50225174", "0.500896", "0.50043267", "0.49962226", "0.4989849", "0.498816", "0.4971391", "0.49668148", "0.49605274", "0.4930028", "0.49213338", "0.49146113", "0.49129832", "0.49062747", "0.4903203", "0.49030453", "0.489337", "0.48818314", "0.4876677", "0.4875419", "0.48677874", "0.48511618", "0.4830212", "0.48264527", "0.48193577", "0.48187226", "0.48178273", "0.48121208", "0.47860092" ]
0.7394642
0
List the inventory of a hotel in a specific date range
async def list_inventory_endpoint(request): hotel_id = request.args["hotel_id"][0] start_date = request.args["start_date"][0] end_date = request.args["end_date"][0] inventory = model.list_inventory(hotel_id, start_date, end_date) if inventory == model.OPERATION_ERROR_RETURN_CODE: return json({"success": False}) return json({"success": True, "inventory": inventory})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inventory(request, concierge, template=\"concierges/inventory_check.html\"):\n inventory = []\n for x in xrange(0, 2):\n day = date.today() + timedelta(days=x)\n dow = DayOfWeek.objects.get(isoweekday=day.isoweekday())\n day_info = {'day': day, 'times': []}\n schedules = dow.tourschedule_set.filter(active=True, tour_type__active=True,\n tour_type__default_site_skin__is_concierge_cta=True).order_by('tour_type__order')\n for sched in schedules:\n product = sched.tour_type.get_product(day, schedule=sched).product\n tour_info = {\n 'day': day,\n 'time': sched.pretty_time,\n 'tour_type': sched.tour_type,\n 'seats_available': Decimal(product.items_in_stock) - Decimal(product.total_sold)\n }\n day_info['times'].append(tour_info)\n\n inventory.append(day_info)\n\n ctx = RequestContext(request, {\n 'concierge': concierge,\n 'inventory': inventory\n })\n\n return render_to_response(template, context_instance=ctx)", "def get_date_range():\n start_date = request.args.get(\"start\", default=None, type=str)\n start_date = datetime.datetime.fromisoformat(start_date)\n end_date = request.args.get(\"end\", default=None, type=str)\n end_date = datetime.datetime.fromisoformat(end_date)\n\n animals = []\n for key in rd.keys(\"*\"):\n animal = json.loads(rd.get(key))\n if (\n start_date\n <= datetime.datetime.fromisoformat(animal[\"created-on\"])\n <= end_date\n ):\n animals.append(animal)\n\n return jsonify(animals)", "def get_slots_for_date(url: str, session: requests.Session) -> List[Dict]:\n response = session.get(\n url,\n headers={\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"Adrum\": \"isAjax:true\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n },\n )\n\n slots = list(\n filter(lambda item: item[\"status\"] != \"UnAvailable\", response.json()[\"slots\"])\n )\n\n return slots", "def list_inventory(self):\n\n print('Your inventory contains:')\n #i = 1\n #inv_dict = {}\n for item in self.bag_of_holding:\n if 'casted' not in item.name:\n try:\n print(item.name)\n except:\n pass\n\n #inv_dict[str(i)] = item\n #i += 1\n #return inv_dict", "def get_queryset(self): # NOQA\n rates = Rate.objects.filter(\n inventory__date__gte=self.kwargs.get('checkin'),\n inventory__date__lte=self.kwargs.get('checkout'),\n room__hotel__public_id=self.kwargs.get('hotel_id')\n ).values('room__public_id', 'price', \"inventory__date\")\n return rates", "def _input_date(stock_list: 'a namedtuple') -> list:\n\n stock_info = stock_list\n\n format_date = '%Y-%m-%d'\n start_date = datetime.strptime(input('Start Date: ').strip(), format_date)\n end_date = datetime.strptime(input('End Date: ').strip(), format_date)\n\n result_stock_list = []\n for stock in stock_info:\n date = datetime.strptime(stock.date, format_date)\n if start_date <= date <= end_date:\n result_stock_list.append(stock)\n result_stock_list.sort()\n return result_stock_list", "def get_reservations():\n start_date = request.args.get('start')\n end_date = request.args.get('end')\n\n if start_date is not None and end_date is not None:\n start = parse_datetime(request.json['start'])\n end = parse_datetime(request.json['end'])\n if start is None or end is None:\n abort(400, 'cannot parse start or end date')\n\n reservations = Reservation.query.filter(\n Reservation.end >= start, Reservation.start <= end)\n else:\n reservations = Reservation.query.filter(\n or_(Reservation.start >= datetime.datetime.now(),\n Reservation.end >= datetime.datetime.now()))\n\n reservations = map(lambda x: x.as_dict(), reservations)\n\n return json.dumps(reservations)", "def test_get_dealer_historical_inventory(self):\n pass", "def get_ride_report(startDate, endDate):\n\n results_list = []\n\n session = DB_SESSION()\n\n results = []\n\n results = session.query(Report).filter(Report.date_created>=startDate, Report.date_created<=endDate)\n\n for result in results:\n results_list.append(result.to_dict())\n print(result.to_dict())\n\n session.close()\n\n return results_list, 200", "def vaccinations(self, from_date: str, to_date: str) -> VaccinationList:\n params = {'date_from': from_date, 'date_to': to_date}\n data = self.get(\"mdg_emvolio\", params=params)\n\n ls = [Vaccination(**area) for area in data]\n return VaccinationList(items=ls)", "def get_items_sold_between(table, month_from, day_from, year_from, month_to, day_to, year_to):\n\n items_sold_between = []\n index = 0\n start_date = str(year_from) + str(month_from) + str(day_from)\n end_date = str(year_to) + str(month_to) + str(day_to)\n for record in table:\n if end_date > record[-1] > start_date:\n items_sold_between.append(record)\n\n return items_sold_between", "def fetch_daterange(self, start_date, end_date=None, table='fashion'):\n\n if end_date is None:\n end_date = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')\n\n end_date_obj = datetime.strptime(end_date, '%Y-%m-%d %H:%M:%S')\n end_day = '{:04d}-{:02d}-{:02d}'.format(end_date_obj.year, \n end_date_obj.month, \n end_date_obj.day)\n\n start_date_obj = datetime.strptime(start_date, '%Y-%m-%d %H:%M:%S')\n curr_day = '{:04d}-{:02d}-{:02d}'.format(start_date_obj.year, \n start_date_obj.month, \n start_date_obj.day)\n \n record_lookup_stmt = \"SELECT * FROM {} WHERE date=%s AND t>%s and t<%s\".format(table)\n \n record_list = []\n while curr_day <= end_day: \n record_list += self.session.execute(record_lookup_stmt, [curr_day, \n start_date,\n end_date])\n start_date_obj += timedelta(days=1)\n curr_day = '{:04d}-{:02d}-{:02d}'.format(start_date_obj.year, \n start_date_obj.month, \n start_date_obj.day) \n\n return record_list", "def date_range(start, end):\n session = Session(engine)\n \n sel = [func.min(measurement.tobs),\n func.max(measurement.tobs),\n func.avg(measurement.tobs)]\n \n range_data = session.query(*sel).\\\n filter(measurement.date >= start).\\\n filter(measurement.date <= end).all()\n \n session.close()\n \n range_x = list(np.ravel(range_data))\n\n return jsonify(range_x)", "def date_range(start_date, end_date):\n list_dates = []\n for n in range((end_date + timedelta(1) - start_date).days):\n temp_date = start_date + timedelta(n)\n list_dates.append(temp_date.strftime('%Y%m%d'))\n return list_dates", "def date_range(start_date, end_date):\n list_dates = []\n for n in range((end_date + timedelta(1) - start_date).days):\n temp_date = start_date + timedelta(n)\n list_dates.append(temp_date.strftime('%Y%m%d'))\n return list_dates", "def temp_range(start_date, end_date):\n \"\"\"for dates between the start and end date inclusive.\"\"\"\n results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()\n\n # Convert list of tuples into normal list\n startend = list(np.ravel(results))\n\n return jsonify(startend)", "def getPurchaseDates(self):\n\t\treturn self.dateList", "def get_daily_list(context, data_dict):\n # noinspection PyUnresolvedReferences\n\n output = []\n start_date_str = _get_or_bust(data_dict, 'startDate')\n try:\n dt.strptime(start_date_str, '%Y-%m-%d')\n except ValueError:\n raise _ValidationError(\n 'startDate \\'{0}\\' not in YYYY-MM-DD format'.format(start_date_str)\n )\n start_date = parse(start_date_str,\n default=default_release_date).astimezone(gettz('UTC'))\n\n if 'endDate' in data_dict:\n end_date_str = data_dict['endDate']\n try:\n dt.strptime(end_date_str, '%Y-%m-%d')\n except ValueError:\n raise _ValidationError(\n 'endDate \\'{0}\\' not in YYYY-MM-DD format'.format(end_date_str)\n )\n end_date = parse(end_date_str,\n default=default_release_date).astimezone(gettz('UTC'))\n days = (end_date - start_date).days + 1\n if days < 1:\n raise _ValidationError(_(\n 'endDate \\'{0}\\' must be greater '\n 'than startDate \\'{1}\\''.format(\n end_date_str,\n start_date_str\n )\n ))\n else:\n days = 1\n\n for day in range(days):\n single_date = (start_date + datetime.timedelta(days=day))\n single_date_str = single_date.replace(tzinfo=None).isoformat()\n q = {\n 'q': (\n 'product_type_code:24 AND '\n 'last_release_date:\"{release_date}Z\"'.format(\n release_date=single_date_str\n )\n )\n }\n\n results = _get_action('package_search')(context, q)\n\n count = results['count']\n if count > 1:\n raise _ValidationError(\n 'More than one Daily for date \\'{0}\\''.format(single_date_str)\n )\n\n for result in results['results']:\n children = []\n\n for child in result.get('child_list', []):\n children.append(\n get_product(context, {\n 'productId': child\n })\n )\n\n result['children'] = children\n output.append(result)\n\n return output", "def get_items_sold_between(table, month_from, day_from, year_from, month_to, day_to, year_to):\n\n min_date = common.dtime(year_from, month_from, day_from)\n max_date = common.dtime(year_to, month_to, day_to)\n\n return [[line[ID], line[TITLE], int(line[PRICE]), int(line[MONTH]), int(line[DAY]), int(line[YEAR])]\n for line in table if min_date < common.dtime(line[YEAR], line[MONTH], line[DAY]) < max_date]", "def range_date():\n # Query all stations within a certain range\n data = [Measurement.date, func.max(Measurement.tobs), func.min(Measurement.tobs), func.avg(Measurement.tobs)]\n qry = session.query(*data).filter(Measurement.date.between('2014-01-17', '2017-01-01')).all()\n before_date = list(np.ravel(qry))\n\n return jsonify(before_date)", "def create_date_list(start_date = start_date, end_date = end_date):", "def list(self, request):\n currentYear = datetime.now().year\n expenses = Expenses.objects.filter(\n date_purchased__contains=currentYear)\n serializer = ExpenseSerializer(\n expenses, many=True, context={'request': request})\n return Response(serializer.data)", "def find_by_date():\n\n input_date = request.args.get('date')\n \n user_id = session['user']\n user_inv = (UserInv.query.filter_by(user_id=user_id)).all()\n\n inv_by_date = []\n\n for item in user_inv: \n if str(item.inv.date_of_investment) == input_date:\n inv_by_date.append({\"company\": item.inv.company_name, \n \"quantity\": item.inv.quantity, \n \"cost\": item.inv.cost})\n print inv_by_date\n\n return jsonify(inv_by_date)", "def getListIngr(cls):\n\n # meals = Meals.getMealsByFutureDate(user=session['User'])\n list_ingr = db.session.query(RecipeIngredient).join(Recipe).join(Meals).\\\n join(Ingredient).\\\n filter(func.substr(Meals.date_planned,0,11) >= func.substr(datetime.today(),0,11)).\\\n filter(Meals.recipe_fk==Recipe.recipe_id).\\\n filter(Recipe.recipe_id==RecipeIngredient.recipe_fk).\\\n filter(RecipeIngredient.ingredient_name==Ingredient.name).\\\n filter(Meals.user_fk==session['User']).\\\n order_by(Meals.date_planned).all()\n\n return list_ingr", "def list_inventory():\n res = {}\n offers = Offer.query.order_by(Offer.offer_id).all()\n for offer in offers:\n res[offer.offer_id] = {'total': count_total_goods(offer.offer_id), 'unallocated': count_available_goods(offer.offer_id)}\n return res", "def planets_in_range(self):\n\n query_string = \"SELECT * from planets_in_range;\"\n\n # Perform query\n self.conn_cur.execute(query_string)\n results = self.conn_cur.fetchall()\n\n # Build dictionary\n ranges = {}\n for row in results:\n ranges[row[0]] = row[1]\n\n return ranges", "def get_ride_request(startDate, endDate):\n\n results_list = []\n\n session = DB_SESSION()\n\n results = []\n\n results = session.query(Request).filter(Request.date_created >= startDate, Request.date_created <=endDate)\n\n for result in results:\n results_list.append(result.to_dict())\n print(result.to_dict())\n\n session.close()\n\n return results_list, 200", "def zenith_range_dates(list_dates, timeframe):\r\n\r\n\tzeniths = []\r\n\r\n\tfor date in list_dates:\r\n\t\tsolar_noon = l.solar_noon(date=date, local=True)\r\n\t\tsolar_zenith = l.solar_elevation(solar_noon.replace(tzinfo=None))\r\n\t\tzeniths.append(solar_zenith)\r\n\r\n\tlist_dates = [date.isoformat() for date in list_dates]\r\n\r\n\tif timeframe == 'last_seven_days' or timeframe == 'this_month' or timeframe == 'last_month':\r\n\t\tformat = 'M/D'\r\n\telif timeframe == 'this_year' or timeframe == 'last_year':\r\n\t\tformat = 'MMM D'\r\n\r\n\treturn {'labels': list_dates, 'data': zeniths, 'yAxisLabel': 'Solar Zenith', 'format': format}", "def visitRange(self, date):\n raise NotImplementedError()", "def _filter_by_date(from_date, until_date):\n qlist = []\n\n if from_date:\n qlist.append(Q(oai_date_stamp__gte=from_date))\n\n if until_date:\n qlist.append(Q(oai_date_stamp__lte=until_date))\n\n return qlist", "def temp_daterange(start_date,end_date):\r\n # Query\r\n mam_temp_dr_results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\r\n filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()\r\n \r\n # Convert results into a list of min, ave, max temps for date range with specific start_date and end_date\r\n mam_temp_start_end = list(np.ravel(mam_temp_dr_results))\r\n return jsonify(mam_temp_start_end)", "def list(self, request, *args, **kwargs):\n data = self.process_query_params()\n if data:\n self.currency_client.get_exchange_rates_by_date_range(**data)\n return super().list(request, *args, **kwargs)", "def look_vacant_offices(request):\n if request.GET:\n if request.GET['datetime_from'] and request.GET['datetime_to']:\n offices = NumberOffice.objects.all()\n reservations = Reservation.objects.all()\n post_from = request.GET['datetime_from']\n post_to = request.GET['datetime_to']\n filteroffice = reservations.all().filter(\n datetime_from__gte=post_from, datetime_to__lte=post_to\n )\n reservednumberoffice = set()\n # set reserved office for corect time\n for i in filteroffice:\n reservednumberoffice.add(i.number_office)\n context = {'offices': offices, \"reservednumberoffice\": reservednumberoffice}\n return render(request, 'coworkings/vacant_offices.html', context)\n else:\n text = 'Enter the correct data or fill in all fields.'\n context = {'text': text}\n return render(request, 'coworkings/look_vacant_offices.html', context)\n else:\n return render(request, 'coworkings/look_vacant_offices.html')", "def load_schedules(self, from_date=None, to_date=None, supplier='All', day=None):\n logger.info('SchedulePurchase loading purchase schedules initiated')\n data = []\n try:\n with Transaction().start(DBNAME, 1):\n if not day:\n dataobj = self.ob.calculate_requirement(from_date, to_date)\n else:\n dataobj = self.ob.update_ingredients(day)\n for i, j in dataobj.iteritems():\n if j[1] <= 0:\n continue\n dictionary = {}\n # Product = Model.get('product.product')\n if supplier == 'All':\n product = self.Product.search([('name', '=', i),\n ('description', '=', 'Stock'),\n ('type', '=', 'goods')])\n else:\n product = self.Product.search([('name', '=', i),\n ('product_suppliers', '=', supplier),\n ('description', '=', 'Stock'),\n ('type', '=', 'goods')])\n product = product[-1] if product else None\n if product:\n dictionary['code'] = product.code\n dictionary['item'] = product.template.name\n dictionary['category'] = product.template.category.name\n dictionary['unit'] = j[0].name\n dictionary['quantity'] = j[1].quantize(Decimal('0.11')).to_eng()\n suppliers = product.template.product_suppliers\n if suppliers:\n dictionary['supplier'] = suppliers[0].party.name\n data.append(dictionary)\n else:\n pass\n return data\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return data", "def test_get_pricehistory_defining_start_and_end_date(self):\n # Change the creation date of the product to 01.01.2019\n dt = datetime.strptime(\"01.01.2019\", \"%d.%m.%Y\")\n Product.query.filter_by(id=1).first().creation_date = dt\n ProductPrice.query.filter_by(product_id=1).first().timestamp = dt\n db.session.commit()\n\n # Insert a pricehistory\n timestamps = [\"02.01.2019\", \"03.01.2019\", \"08.01.2019\", \"10.01.2019\"]\n self.insert_pricehistory(timestamps)\n\n # Query all entries from the 02.01.19 to 08.01.19\n start = int(datetime(year=2019, month=1, day=2).timestamp())\n end = int(datetime(year=2019, month=1, day=8).timestamp())\n url = f\"/products/1/pricehistory?start_date={start}&end_date={end}\"\n res = self.get(url=url, role=\"admin\")\n pricehistory = json.loads(res.data)\n # There should be only the entries [02.01.19, 03.01.19 and 08.01.19]\n self.assertEqual(len(pricehistory), 3)", "def ships_in_range(self):\n\n query_string = \"SELECT * from ships_in_range;\"\n\n # Perform query\n self.conn_cur.execute(query_string)\n results = self.conn_cur.fetchall()\n\n # Build dictionary\n ranges = {}\n for row in results:\n ranges[row[0]] = row[1]\n\n return ranges", "def range_temp(start,end):\n year, month, date = map(int, start.split('-'))\n date_start = dt.date(year,month,day)\n year2, month2, date2 = map(int, end.split('-'))\n date_end = dt.date(year2,month2,day2)\n # Query for tobs for definied date range\n results = session.query(func.min(Measurement.tobs),func.max(Measurement.tobs).\\\n func.avg(Measurement.tobs)).filter(Measurement.date >= date_start).filter(Measurement.date <= date_end).all()\n data = list(np.ravel(results))\n return jsonify(data)", "def show_inventory(lst_Inventory):\r\n \r\n print('======= The Current Inventory: =======')\r\n print('ID\\tCD Title (by: Artist)\\n')\r\n for row in lst_Inventory:\r\n print('{}\\t{} (by:{})'.format(cd_instance.cd_id, cd_instance.cd_title, cd_instance.cd_artist))\r\n print('======================================')", "def start_end(start_date,end_date):\n\n session = Session(engine)\n\n # Query from database full temp results for dates range\n temp_results = session.query(func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs)).\\\n filter(measurement.date >= start_date).\\\n filter(measurement.date <= end_date).all()\n \n session.close() \n \n return jsonify(temp_results)", "def get_shipments_by_date(auth, date, base_url='https://api.cratejoy.com/v1/'):\n \n shipment_endpoint = '{}shipments/?batch.end__lt={}T00:00:00Z'.format(base_url, date)\n\n resp = requests.get(\n shipment_endpoint,\n auth=auth\n )\n\n print('GET request to {} responded with status '\n 'code: {}'.format(shipment_endpoint,\n resp.status_code))\n print(resp.content)", "def currentList(inputSpreadsheet,inputRange):\n service = getService()\n\n readSheet = service.spreadsheets()\n result = readSheet.values().get(spreadsheetId=inputSpreadsheet,\n range=inputRange).execute()\n volunteerList = result.get('values', [])\n volunteers = []\n for sublist in volunteerList:\n for volunteer in sublist:\n volunteers.append(volunteer)\n return volunteers;", "def find_within_dates(self,\r\n datefrom=(1,1,1),\r\n dateto=(3000,12,31),\r\n withinrange=None,\r\n orequal=False,\r\n most_recent=False):\r\n\r\n def convert (date):\r\n\r\n if isinstance(date,str):\r\n #If input is a string convert to a tuple\r\n date += '-01-01'\r\n date = datefrom.split(DASH)\r\n year, month, day = date[0].replace(PLUS,DASH), date[1], date[2]\r\n date = int(year), int(month), int(day)\r\n if isinstance(date, (list,tuple)):\r\n #If a tuple, convert to a datetime object\r\n date = datetime.datetime(date[0],date[1],date[2])\r\n return date\r\n\r\n if withinrange is None:\r\n #If not range assigned, default to all indexes\r\n withinrange = self.indexes()\r\n\r\n datefrom = convert(datefrom)\r\n dateto = convert(dateto)\r\n\r\n\r\n if not orequal:\r\n return [a_temp for a_temp in withinrange\r\n if self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True)> datefrom\r\n and self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) < dateto]\r\n return [a_temp for a_temp in withinrange\r\n if self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) >= datefrom and\r\n self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) <= dateto]", "def temp_range_start(start):\n # Create our session (link) from Python to the DB\n session = Session(engine)\n start_list = []\n results = session.query( Measurement.date,\\\n func.min(Measurement.tobs), \\\n func.avg(Measurement.tobs), \\\n func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).\\\n group_by(Measurement.date).all()\n for date, min, avg, max in results:\n new_dict = {}\n new_dict[\"Date\"] = date\n new_dict[\"Min_Temp\"] = min\n new_dict[\"Avg_Temp\"] = avg\n new_dict[\"Max_Temp\"] = max\n start_list.append(new_dict)\n session.close() \n return jsonify(start_list)", "def _getAllMinistries(date):\n session = Session()\n mfilter=sql.or_( \n sql.between(date, schema.groups.c.start_date, schema.groups.c.end_date),\n sql.and_(\n (schema.groups.c.start_date < date ),\n (schema.groups.c.end_date == None)\n )\n )\n query = session.query(domain.Ministry).filter(mfilter)\n return query.all()", "def filter_meetings_by_date(self, start_date, end_date):\n db_connection = DbConnection()\n\n try:\n connection = db_connection.get_connection()\n\n cursor = connection.cursor()\n cursor.execute(self.select_sql, (start_date, end_date))\n rows = cursor.fetchall()\n\n cursor.close()\n db_connection.close_connection()\n except Exception:\n raise\n\n else:\n\n return rows", "def view_reservations(request):\n if not request.user.is_superuser:\n messages.error(request, \"Sorry, you don't have access to this \\\n part of the site.\")\n return redirect(reverse('home'))\n\n reservations = Reservation.objects.all()\n\n past_reservations = []\n upcoming_reservations = []\n arrivals_today = []\n arrivals_next = []\n departures = []\n inhouse_guests = []\n for reservation in reservations:\n for item in reservation.lineitems.all():\n if item.check_in and item.check_out < datetime.today().date():\n if reservation not in past_reservations:\n past_reservations.append(reservation)\n elif item.check_in > datetime.today().date():\n if reservation not in upcoming_reservations:\n upcoming_reservations.append(reservation)\n\n for reservation in reservations:\n for item in reservation.lineitems.all():\n if item.check_in == datetime.today().date():\n if reservation not in arrivals_today:\n arrivals_today.append(reservation)\n elif item.check_out == datetime.today().date():\n if reservation not in departures:\n departures.append(reservation)\n elif item.check_in < datetime.today().date() and item.check_out > datetime.today().date():\n if reservation not in inhouse_guests:\n inhouse_guests.append(reservation)\n elif item.check_in > datetime.today().date() and item.check_in < datetime.today().date() + timedelta(days=7):\n if reservation not in arrivals_next:\n arrivals_next.append(reservation)\n\n template = 'profiles/reservations.html'\n context = {\n \"reservations\": reservations,\n \"past_reservations\": past_reservations,\n \"upcoming_reservations\": upcoming_reservations,\n \"arrivals_today\": arrivals_today,\n \"arrivals_next\": arrivals_next,\n \"departures\": departures,\n \"inhouse_guests\": inhouse_guests,\n }\n\n return render(request, template, context)", "def fetch_menus(start_date):\n end_date = start_date + timedelta(days=6)\n resp = {\"start_date\": start_date, \"data\": []}\n while start_date < end_date:\n resp[\"data\"].append(fetch_menu(start_date))\n start_date += timedelta(days=1)\n\n return resp", "def test_get_inventory_list(self):\n resp = self.app.get('/inventories')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n self.assertEqual(len(data), 2)", "def get_events(start, end, filters=None):\n\n from frappe.desk.calendar import get_event_conditions\n conditions = get_event_conditions('Booking Request', filters)\n\n data = frappe.db.sql(\"\"\"\n SELECT\n `tabBooking Request`.name, `tabBooking Request`.eta_date,\n `tabBooking Request`.etd_date, `tabBooking Request`.status, `tabBooking Request Status`.color\n FROM\n `tabBooking Request`\n LEFT JOIN `tabBooking Request Status` ON `tabBooking Request`.status = `tabBooking Request Status`.name\n WHERE\n (`tabBooking Request`.docstatus < 2)\"\"\", as_dict=True)\n\n return data", "def get_invoices(self, since, until):\n return self._request('getInvoices', data={\n 'date_from': since.strftime('%d/%m/%Y'),\n 'date_to': until.strftime('%d/%m/%Y')\n })", "def list_availability_definition(self):\n return self._get(path='availability')", "def get_available_items(table):\n\n list_of_items = []\n\n for i in range(len(table)):\n\n table[i][3] = int(table[i][3])\n table[i][4] = int(table[i][4])\n expiration_date = table[i][3] + table[i][4]\n durability = 2017 - expiration_date\n\n if durability <= 0:\n list_of_items.append(table[i])\n\n return list_of_items", "def filter_by_date(items, start_time, end_time=None):\n start_time = parser.parse(start_time + \"UTC\").timestamp()\n if end_time:\n end_time = parser.parse(end_time + \"UTC\").timestamp()\n else:\n end_time = time.time()\n\n filtered_items = []\n for item in items:\n if 'time' in item:\n item_time = item['time']\n elif 'timestamp' in item:\n item_time = item['timestamp']\n timestamp = parser.parse(item_time + \"UTC\").timestamp()\n if end_time > timestamp > start_time:\n filtered_items.append(item)\n\n return filtered_items", "def date_range(start_date, end_date):\n return [start_date + timedelta(x) for x in range((end_date - start_date).days + 1)]", "def test_get_pricehistory_end_before_start(self):\n url = \"/products/1/pricehistory?start_date=1000&end_date=900\"\n res = self.get(url=url, role=\"admin\")\n self.assertException(res, exc.InvalidData)", "def prepare_data_with_location(self,from_date,to_date,locations,all_products):\n data_dict = {}\n stock_quant_obj=self.env['stock.quant']\n for loc in locations:\n all_locations = self.get_all_locations(warehouse=False, location=loc)\n if not all_locations:\n continue\n #here we are finding the opening stock for these we are using base query\n #of inventory at date v10\n result = self.get_product_qty(all_locations,from_date)\n qty_dict = dict((x,y) for x, y in result)\n \n for product in all_products:\n last_sales = ''\n qty_purchase_in_duration = 0\n qty_sales_in_duration = 0\n last_purchase_date = ''\n scrap_location_qty = 0\n adjusted_qty_in_duration = 0\n warehouse_out_qty = 0\n warehouse_in_qty = 0\n# here from result of inventory at date we are seaching for specific product.\n opening_product_qty = qty_dict.get(product.id)\n\n #finding last sales qty\n last_sales = self.find_last_sales_qty(from_date,to_date,False,all_locations,product)\n #finding last purchase date of product\n last_purchase_date = self.find_last_purchase_date(from_date,to_date,all_locations,product)\n #fiding date purchase qty in duration for specific product\n qty_purchase_in_duration = self.find_purchase_qty_in_duration(from_date,to_date,all_locations,product)\n #fiding scrap qty of precific product\n scrap_location_qty = self.find_scap_location_qty(from_date,to_date,product,all_locations)\n #finding sales qty in duration\n qty_sales_in_duration = self.find_sale_qty_in_duration(from_date,to_date,False,all_locations,product)\n #fidning adjusted qty in duration\n adjusted_qty_in_duration = self.find_adjusted_qty_in_duration(from_date, to_date, product, all_locations)\n\n # dest_location_lst = self.get_other_wahouse_locations(warehouse)\n \n # if any(all_locations) and any(dest_location_lst):\n # #fidning warehouse in qty \n # warehouse_in_qty = self.find_warehouse_transer_in_qty(product, all_locations, dest_location_lst,from_date,to_date)\n # #fidning warehouse out qty for specific product.\n # warehouse_out_qty = self.find_warehouse_transer_out_qty(product, all_locations, dest_location_lst,from_date,to_date)\n \n # if warehouse_out_qty:\n # warehouse_out_qty = warehouse_out_qty and warehouse_out_qty[0][0] or ''\n # if warehouse_in_qty:\n # warehouse_in_qty = warehouse_in_qty and warehouse_in_qty[0][0] or ''\n \n if adjusted_qty_in_duration:\n adjusted_qty_in_duration = adjusted_qty_in_duration and adjusted_qty_in_duration[0][0] or '' \n if scrap_location_qty:\n scrap_location_qty = scrap_location_qty and scrap_location_qty[0][0] or ''\n \n # if qty_sales_in_duration:\n # qty_sales_in_duration = qty_sales_in_duration and qty_sales_in_duration[0][0] or ''\n # if qty_purchase_in_duration:\n # qty_purchase_in_duration = qty_purchase_in_duration or ''\n if last_sales:\n last_sales = datetime.strptime(last_sales and last_sales[0][0], '%Y-%m-%d %H:%M:%S').strftime('%d-%m-%Y') or ''\n \n if last_purchase_date:\n last_purchase_date = datetime.strptime(last_purchase_date and last_purchase_date[0][0], '%Y-%m-%d %H:%M:%S').strftime('%d-%m-%Y') or ''\n \n if data_dict.has_key(loc.id):\n data_lst=data_dict.get(loc.id)\n data_lst.append({'product':product,'sku':product.default_code or '','name':product.name,\n 'Cost':product.standard_price or '','sales_price':product.lst_price or '',\n 'opening_qty':opening_product_qty or 0,'last_sales':last_sales or '',\n 'last_purchase_date':last_purchase_date or '','qty_purchase_in_duration':qty_purchase_in_duration or 0,\n 'qty_sales_in_duration': qty_sales_in_duration or 0,'scrap_location_qty':scrap_location_qty or 0,\n 'adjusted_qty_in_duration':adjusted_qty_in_duration or 0\n ,'warehouse_in_qty':warehouse_in_qty or 0,\n 'warehouse_out_qty':warehouse_out_qty or 0 \n })\n data_dict.update({loc.id:data_lst})\n continue\n data_dict.update({loc.id:[{'product':product,'sku':product.default_code or '','name':product.name,\n 'Cost':product.standard_price or '','sales_price':product.lst_price or '',\n 'opening_qty':opening_product_qty or 0,\n 'last_sales':last_sales or '','last_purchase_date':last_purchase_date or '',\n 'qty_purchase_in_duration':qty_purchase_in_duration or 0,\n 'qty_sales_in_duration': qty_sales_in_duration or 0,\n 'scrap_location_qty':scrap_location_qty or 0,\n 'adjusted_qty_in_duration':adjusted_qty_in_duration or 0,\n 'warehouse_in_qty':warehouse_in_qty or 0,\n 'warehouse_out_qty':warehouse_out_qty or 0\n }]})\n return data_dict", "def date_range(start, end):\n \"\"\"between the start and end date inclusive.\"\"\"\n # Create a link to the session\n session = Session(engine)\n \n # Get the start and end date of the data\n final_date = session.query(Measurements.date).order_by(Measurements.date.desc()).first()[0]\n first_date = session.query(Measurements.date).order_by(Measurements.date.asc()).first()[0]\n \n # Make sure dates are in range of available data\n if (start > final_date) or (start < first_date) or (end > final_date) or (end < first_date) or (start>end):\n return f\"{start} - {end} is not a proper date range.</br>Try dates between {first_date} - {final_date}\"\n\n # Query the min, avg, and max temps for the given timeframe\n results = []\n while start <= end:\n min_temp = session.query(func.min(Measurements.tobs)).filter(Measurements.date==start).first()[0]\n avg_temp = session.query(func.avg(Measurements.tobs)).filter(Measurements.date==start).first()[0]\n max_temp = session.query(func.max(Measurements.tobs)).filter(Measurements.date==start).first()[0]\n \n # Store the information retrieved\n results.append([start, min_temp, avg_temp, max_temp])\n \n # Update the date to check the next record\n date1 = start.split(\"-\")\n date1 = dt.date(int(date1[0]), int(date1[1]), int(date1[2])) + dt.timedelta(days=1)\n start = date1.strftime(\"%Y-%m-%d\")\n\n session.close()\n\n # Create a dictionary from the query results\n date_temps = []\n for date, min_temp, avg_temp, max_temp in results:\n date_temps_dict = {}\n date_temps_dict[\"date\"] = date\n date_temps_dict[\"min_temp\"] = min_temp\n date_temps_dict[\"avg_temp\"] = round(avg_temp, 2)\n date_temps_dict[\"max_temp\"] = max_temp\n date_temps.append(date_temps_dict)\n \n return jsonify(date_temps)", "def test_date_rage(self):\n\n query_params = {\n 'until_date': self.today,\n 'from_date': self.today,\n }\n search = OrderSearchEngine()\n query = search.filter_query(query_params)\n content = Q(created_at__range=[self.from_date, self.until_date])\n self.assertEqual(str(query), str(content))", "def movements(self, product_uuid, from_date, to_date=date.today(), limit=25, offset=0):\n params = {\n 'fromDate': from_date.strftime('%d/%m/%Y'),\n 'toDate': to_date.strftime('%d/%m/%Y'),\n 'limit': limit,\n 'offset': offset\n }\n response = requests.get(self._url(self._MOVEMENTS_PATH % product_uuid), params=params, headers=self._headers)\n return response.json()", "async def get_hotels(query: str, limit: int, lang: str) -> list:\n data = {'query': query,\n 'lang': lang,\n 'lookFor': 'hotel',\n 'limit': limit}\n resp = await booking_instance.get_hotels(data)\n results = resp.get('results')\n return results.get('hotels')", "def search_by_date_range(self, tl):\n print(\"Search by date range\")\n dates = input(\"Please use YYYYMMDD-YYYYMMDD for date range: \")\n date1_str, date2_str = dates.split('-')\n try:\n date1 = datetime.datetime.strptime(date1_str, utils.fmt)\n date2 = datetime.datetime.strptime(date2_str, utils.fmt)\n except ValueError as err:\n utils.print_error(err)\n return self.search_by_date_range(tl)\n else:\n return tl.findall_date_range(date1, date2)", "def _DateRangeQuery(self, start_date='2007-01-01', end_date='2007-07-01'):\n\n print 'Date range query for events on Primary Calendar: %s to %s' % (\n start_date, end_date,)\n query = gdata.calendar.client.CalendarEventQuery(start_min=start_date, start_max=end_date)\n feed = self.cal_client.GetCalendarEventFeed(q=query)\n for i, an_event in zip(xrange(len(feed.entry)), feed.entry):\n print '\\t%s. %s' % (i, an_event.title.text,)\n for a_when in an_event.when:\n print '\\t\\tStart time: %s' % (a_when.start,)\n print '\\t\\tEnd time: %s' % (a_when.end,)", "def invoice_items(self,org_id=None,query={}):\n if org_id is None:\n org_id = self.org_id\n query_end_date = datetime.strptime(query['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n # Given a 'query_end_date' to find the invoice containing the\n # line items for that date we need to find the invoice which \n # has 'endDate' equal to the end of the month of the `query_end_date`\n query_first_next_month = query_end_date + relativedelta(months=+1) - relativedelta(days=(query_end_date.day-1))\n target_invoices = []\n invoices = self.invoices(org_id)\n if self.verbose:\n print('Searching invoices org_id={}'.format(org_id))\n print('query={} query_end_date={}'.format(query,query_end_date))\n print('Result keys: {}'.format( invoices['content'].keys() ))\n print('Total result count: {}'.format( invoices['content']['totalCount'] ))\n for invoice in invoices['content']['results']:\n #pprint.pprint(invoice)\n end_date = datetime.strptime(invoice['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n if self.verbose: \n print('invoice({})[\\'endDate\\']={} end_date={}'.format(invoice['id'],invoice['endDate'],end_date))\n if end_date == query_first_next_month:\n target_invoices.append(invoice)\n \n if self.verbose: \n print('Target invoices: {}'.format(target_invoices))\n \n\n target_line_items = []\n for invoice in target_invoices:\n invoice_details = self.invoices(org_id,invoice['id']) \n print('invoice_details: {}'.format(invoice_details))\n for item in invoice_details['content']['lineItems']:\n end_date = datetime.strptime(item['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n if end_date == query_end_date:\n target_line_items.append(item)\n if self.verbose:\n print('target_line_items: {}'.format(target_line_items)) \n return target_line_items", "def run(self):\n ilist = []\n key_filter = filters[self.args['filter_group']]\n for item in self.client.describe_instances()['Reservations']:\n for instance in item['Instances']:\n idict = {}\n for tag in instance['Tags']:\n if not any(t['Key'] == 'Name' for t in instance['Tags']):\n tag['Value'] = 'Unnamed'\n idict['Name'] = tag['Value']\n if tag['Key'] == 'Name':\n if tag['Value'] == \"\":\n tag['Value'] = 'Unnamed'\n idict['Name'] = tag['Value']\n for key in key_filter:\n try:\n if key in ['AvailabilityZone','Tenancy']:\n idict[key] = instance['Placement'][key]\n elif key == 'SecurityGroups':\n sg_list = []\n for sg in instance[key]:\n sg_list.append(sg['GroupId'])\n if self.args['output'] == 'csv':\n sg_string = \" \\n\"\n idict[key] = sg_string.join(sg_list)\n else:\n idict[key] = ','.join(sg_list)\n elif key == 'BlockDeviceMappings':\n devices = []\n for dev in instance[key]:\n devices.append(dev['DeviceName'])\n if self.args['output'] == 'csv':\n dev_string = \" \\n\"\n idict[key] = dev_string.join(devices)\n else:\n idict[key] = ','.join(devices)\n elif key == 'State':\n idict[key] = instance[key]['Name']\n else:\n if instance[key]:\n idict[key] = instance[key]\n except Exception as e:\n idict[key] = 'N/A'\n ilist.append(idict)\n self.template(self.sortList(ilist))", "def inventory(env):\n envs = environments()\n check_env(env, envs)\n headers, fact_names = inventory_facts()\n\n return render_template(\n 'inventory.html',\n envs=envs,\n current_env=env,\n fact_headers=headers)", "def get_availability(self, schedules, start, end, interval=60):\n url = self.build_url(self._endpoints.get('get_availability'))\n\n data = {\n 'startTime': self._build_date_time_time_zone(start),\n 'endTime': self._build_date_time_time_zone(end),\n 'availabilityViewInterval': interval,\n 'schedules': schedules\n }\n\n response = self.con.post(url, data=data)\n if not response:\n return []\n\n data = response.json().get('value', [])\n\n # transform dates and availabilityView\n availability_view_codes = {\n '0': 'free',\n '1': 'tentative',\n '2': 'busy',\n '3': 'out of office',\n '4': 'working elsewhere',\n }\n for schedule in data:\n a_view = schedule.get('availabilityView', '')\n schedule['availabilityView'] = [availability_view_codes.get(code, 'unkknown') for code in a_view]\n for item in schedule.get('scheduleItems', []):\n item['start'] = self._parse_date_time_time_zone(item.get('start'))\n item['end'] = self._parse_date_time_time_zone(item.get('end'))\n\n return data", "def daily_values(self) -> List[RecipeObjectNutrientsCalories]:\n return self._daily_values", "def list_dates(product):\n\n if product == 'analysis_assim':\n files = _list_files(product)\n dates = []\n for f in files:\n date = _date_from_filename(f)\n dates.append(date)\n dates = list(set(dates)) # Get unique dates\n else:\n template = (HS_DATA_EXPLORER_URI + 'files_explorer/get-folder-contents'\n '/?selection_path=%2Fprojects%2Fwater%2Fnwm%2Fdata%2F{0}'\n '%3Ffolder&query_type=filesystem')\n if 'long_range' in product:\n product = 'long_range'\n uri = template.format(product)\n response = urlopen(uri).read()\n dates = re.findall(r'\\>([0-9]+)\\<', response)\n return sorted(dates)", "def _getdata(self, data):\n lines = []\n start_date = str(data['form']['start_date'])\n end_date = str(data['form']['end_date'])\n department_ids = data['form']['department_ids']\n\n vehicles_ids = self.pool.get('fleet.vehicle').search(self.cr, self.uid,\\\n [('department_id', 'in', department_ids)], context=self.context)\n\n fuel_qty_line_obj = self.pool.get('fuel.qty.line')\n\n sdate = datetime.strptime(start_date, \"%Y-%m-%d\")\n syear = sdate.year\n smonth = sdate.month\n edate = datetime.strptime(end_date, \"%Y-%m-%d\")\n eyear = edate.year\n emonth = edate.month\n\n fuel_qty_line_ids = fuel_qty_line_obj.search(self.cr, self.uid,\\\n [('vehicles_id', 'in', vehicles_ids)], context=self.context)\n\n\n\n counter = 1\n for qty_line in fuel_qty_line_obj.browse(self.cr, self.uid, \\\n fuel_qty_line_ids, context=self.context):\n current_m = int(qty_line.month)\n current_y = int(qty_line.year)\n start = current_m >= smonth and current_y >= syear\n end = current_m <= emonth and current_y <= eyear\n if start and end:\n line = {'type':str(counter)+\" : \"+\\\n qty_line.vehicles_id.type.name}\n line['vehicle_no'] = qty_line.vehicles_id.vin_sn\n line['spent'] = qty_line.spent_qty\n line['counter_no'] = str(qty_line.vehicles_id.odometer)+\" \"+\\\n qty_line.vehicles_id.odometer_unit\n line['date'] = qty_line.month+\"/\"+qty_line.year\n lines.append(line)\n counter += 1\n return lines", "def get_day_range(self, order_date, reg_date):\n\t\tdays = (order_date - reg_date).days\n\t\tfor day_range in self.day_ranges:\n\t\t\tif days >= day_range[0] and days <= day_range[1]:\n\t\t\t\treturn day_range\n\t\treturn []", "def get_calendar_items(self, take=5):\n url = 'https://www.rova.nl/api/waste-calendar/upcoming'\n # request data from rova API and save response first 5 items (default)\n response = requests.get(url, params={\n 'postalcode': self.zip_code,\n 'houseNumber': self.house_number,\n 'addition': self.house_addition,\n 'take': take,\n })\n\n response.raise_for_status()\n\n rova_response = response.json()\n\n items = []\n types = []\n # add next pickup date for each garbage type\n for item in rova_response:\n date = datetime.strptime(item[\"date\"], \"%Y-%m-%dT%H:%M:%SZ\")\n date = date.strftime(\"%Y-%m-%dT%H:%M:%S\")\n garbage_type = item[\"garbageTypeCode\"].upper()\n\n items.append({\n 'GarbageTypeCode': garbage_type,\n 'Date': date\n })\n types.append(garbage_type)\n return items", "def list_availability(self, metric_id, **kwargs):\n prefix_id = \"availability/{}\".format(urlquote(metric_id, safe=''))\n return self._list_data(prefix_id=prefix_id, **kwargs)", "def generate_date_range_rows(self):\n logging.info(\"Starting method to get the table headers\")\n\n # Variable\n counter = 0\n collector = []\n n_days = (self.to_date - self.from_date).days\n\n # Increment in a while loop to generate the table heading ...\n while counter != n_days + 1:\n collect_date = self.from_date + timedelta(days=counter)\n collector.append(\n collect_date.strftime(\"%a\") + ', ' + collect_date.strftime('%Y/%m/%d')\n )\n counter += 1\n\n # Insert the from date and till date on the row to populate the date field\n self.roster_rows['today_date_roster_format'] = datetime.strftime(self.today, '%a, %Y/%m/%d')\n self.roster_rows['from_date'] = datetime.strftime(self.from_date, '%Y/%m/%d')\n self.roster_rows['to_date'] = datetime.strftime(self.to_date, '%Y/%m/%d')\n self.roster_rows['today_date'] = datetime.strftime(self.today, '%Y/%m/%d')\n\n logging.info(\"Finished collecting table headers\")\n return collector", "def slice(self, start_date, end_date = None):\n\n if end_date is None:\n end_date = self.series.index[-1]\n self.series = self.series.loc[start_date:end_date]", "def is_car_available_in_the_selected_period(date_from, date_to, car_id):\n session = start_session()\n queryset = session.query(CarReservation).filter(CarReservation.id_car.__eq__(car_id))\n reservations_list = queryset2list(queryset)\n try:\n date_from = datetime.strptime(date_from, '%Y-%m-%d')\n date_to = datetime.strptime(date_to, '%Y-%m-%d')\n is_available = True\n for reservation in reservations_list:\n if dates_intervals_are_overlapped(reservation.date_from, reservation.date_to, date_from.date(), date_to.date()):\n is_available = False\n return is_available\n except ValueError:\n return False", "def get_data(self):\n data = load.loader.get_data_for_hotel(self.hotel_name, self.filter)\n\n self.data_items = []\n\n if data:\n for row in data:\n for col in row:\n self.data_items.append(col)\n self.row_number = str(self.get_row_number())\n\n self.hotel_full_data = self.hotel_name + ' ' + load.loader.get_hotel_address(self.hotel_name)", "def month_overview(items, month_long):\n events = []\n for item in items:\n dt = datetime.strptime(item['Date'], '%m/%d/%Y')\n if filters.month_l_filter(dt.month) == month_long:\n events.append(item)\n return events", "def test_list_daily_prices(self):\n from grand_exchanger.resources.graph import Graph\n\n price_history = Graph(\n daily={\n datetime(2020, 7, 26, 0, 0): 120,\n datetime(2020, 7, 25, 0, 0): 110,\n datetime(2020, 7, 27, 0, 0): 100,\n },\n average={},\n )\n\n assert list(price_history.list_daily_prices()) == [\n (datetime(2020, 7, 27, 0, 0), 100),\n (datetime(2020, 7, 26, 0, 0), 120),\n (datetime(2020, 7, 25, 0, 0), 110),\n ]", "def run(self) -> list:\n logger.debug('Fetching date %s', self._day.strftime('%Y/%m/%d'))\n \n regions = [r() for r in regions_list]\n air_quality = list()\n \n # fetch air quality of each region\n for r in regions:\n r.fetch_air_quality(self._day)\n \n # gather results from all regions\n for r in regions:\n # wait until region has fetched his data\n r.wait_for_quality()\n logging.info('Fetched region:%s for day:%s', r.name, self._day)\n air_quality.append({\n 'name': r.name,\n 'provinces': [\n {'name': x.name, 'short': x.short_name, 'quality': x.quality.asdict()} \n for x in r.provinces]\n })\n\n self._fetcher.fetched_result(self._day, air_quality)", "async def get_rent_alram_list(\n request: Request,\n user_id: int = 0,\n date_from: object = None,\n date_to: object = None,\n limit: int = 0,\n offset: int = 0) -> list:\n\n ret_val = []\n\n query_str = get_alarm_timein_query\n\n try:\n if limit > 0:\n #query_str += ' ORDER BY rnt.date_to DESC LIMIT $4 OFFSET $5;'\n async with request.app.pg.acquire() as connection:\n rows = await connection.fetch(\n query_str)\n else:\n #query_str += ' ORDER BY rnt.date_to DESC;'\n async with request.app.pg.acquire() as connection:\n rows = await connection.fetch(query_str)\n\n if rows is not None:\n ret_val = [dict(x) for x in rows]\n\n except Exception as gclerr:\n logger.error('get_rent_list service erred with: {}'.format(gclerr))\n\n return ret_val", "def menu(self, venue_id, date):\n query = \"&date=\" + date\n response = self._request(V2_ENDPOINTS['MENUS'] + venue_id + query)\n return response", "def list_all_reservations():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n reservation_filter = request.args.get('reservation-filter', None)\n reservations_list = get_all_reservations_list(reservation_filter)\n cars_reservations_list = get_cars_user_reservations_list(reservations_list)\n reservations_status_list = get_reservations_status_list(reservations_list)\n users_list_for_reservations = get_users_list_for_reservations_list(reservations_list)\n if check_authentication(session_id, user_id) and is_admin_user(user_id):\n return render_template('admin_area.html', user=user_id, session_id=session_id,\n reservations_list=reservations_list, cars_reservations_list=cars_reservations_list,\n reservations_status_list=reservations_status_list,\n users_list_for_reservations=users_list_for_reservations,\n reservations_list_mode=True)\n else:\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=False,\n preview_length=get_cars_preview().__len__(), del_session_cookie=True)", "def get_queryset(self):\n\t\treturn EffortInstance.objects.order_by('-date_start')[:15]", "def temp_range_stats(start, end):\n \n # Create our session (link) from Python to the DB\n session = Session(engine)\n \n dates_ = session.query(Measurement.date)\n dates = [x[0] for x in dates_]\n if start not in dates or end not in dates:\n session.close()\n return jsonify({\"error\": f\"Date {start} or {end} not found.\"}), 404\n \n else:\n results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).filter(Measurement.date <= end).all()\n \n temp_stats = [\n {\"tmin\": results[0][0]},\n {\"tavg\": results[0][1]},\n {\"tavg\": results[0][2]}\n ]\n\n session.close()\n \n return jsonify(temp_stats)", "def check_visited_places(userid: int, day_range: int):\n print(f\"Checking visited places by user {userid} in the last {day_range} days\")\n # get reservations in which user actually showed up from Reservation service\n range = datetime.now() - timedelta(days=day_range)\n range.replace(hour=0, minute=0, second=0, microsecond=0)\n\n reservations = Reservation.query.filter_by(user_id=userid).\\\n filter(Reservation.entrance_time != None).filter(Reservation.entrance_time >= range).all()\n # print(\"DB\", db)\n # also all results must be json serializable\n return [row.to_dict() for row in reservations]", "def load_stock(self):\n lines = []\n with Transaction().start(DBNAME, 1):\n stock_lines = self.Inventory.search([('state', '=', 'done'), ('location', '=', self.location.id)])\n if stock_lines:\n for i in stock_lines:\n batch = i.batch_number\n for j in i.lines:\n if j.quantity <= 0:\n continue\n dictionary = {}\n dictionary['code'] = j.product.code\n dictionary['item'] = j.product.template.name\n dictionary[\n 'category'] = j.product.template.category.name if j.product.template.category else None\n dictionary['quantity'] = Decimal(j.quantity).quantize(Decimal('0.11')).to_eng()\n dictionary['batch_number'] = batch\n dictionary['supplier'] = j.supplier.name if j.supplier else None\n dictionary['expiry_date'] = j.expiry_date.strftime('%d-%m-%Y') if j.expiry_date else None\n lines.append(dictionary)\n return lines", "def reservation_data(self):\n reservations = []\n\n for reservation in self.reservations():\n resource = utils.get_resource_by_uuid(reservation.resource)\n\n if resource is None:\n log.warn('Invalid UUID %s' % str(reservation.resource))\n continue\n\n resource = resource.getObject()\n\n data = {}\n\n data['title'] = utils.get_resource_title(resource)\n\n timespans = []\n for start, end in reservation.timespans():\n timespans.append(u'◆ ' + utils.display_date(start, end))\n\n data['time'] = '<br />'.join(timespans)\n data['quota'] = utils.get_reservation_quota_statement(\n reservation.quota\n ) if reservation.quota > 1 else u''\n\n data['url'] = resource.absolute_url()\n data['remove-url'] = ''.join((\n resource.absolute_url(),\n '/your-reservations?remove=',\n reservation.token.hex\n ))\n reservations.append(data)\n\n return reservations", "def test_api_can_search_employee_by_between_dates(self):\n res = self.client().get(service_url_emp+'/search_between/2013-10-24,2014-10-24')\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))\n self.assertIn('name2', str(res.data))", "def active_roast():\n c = mongo.db[app.config['INVENTORY_COLLECTION']]\n items = c.find({'user': current_user.get_id()})\n output = list()\n for x in items:\n x['id'] = str(x['_id'])\n if int(x['stock']) < 100:\n continue\n if app.config['SIMULATE_ROAST'] and x['label'] != 'Test Beans':\n continue\n output.append(x)\n output.sort(key=lambda x: x['datetime'], reverse=True)\n return render_template('roast.html', inventory=output)", "def get_events(self, start_date: datetime, end_date: datetime):\n\n events = []\n # Iterate through all events over the given\n for event_string in self._calendar.date_search(start_date, end_date):\n events.append(Event(event_string))\n return events", "def enumerate_appointments(age, gender, nb=2, price=60.):", "def list(self, **params):\n\n _, _, vouchers = self.http_client.get(\"/vouchers\", params=params)\n return vouchers", "def filter_data_by_date(df, ticker, start_date, end_date):\n if start_date is None:\n start_date = MIN_DATE\n\n if end_date is None:\n end_date = MAX_DATE\n\n filtered = df[\n (df[\"ticker\"] == ticker) & (df[\"date\"] >= start_date) & (df[\"date\"] <= end_date)\n ]\n return filtered", "async def get_rent_list(\n request: Request,\n user_id: int = 0,\n date_from: object = None,\n date_to: object = None,\n limit: int = 0,\n offset: int = 0) -> list:\n\n ret_val = []\n\n query_str = get_rent_list_query\n\n try:\n if limit > 0:\n query_str += ' ORDER BY rnt.date_to DESC LIMIT $4 OFFSET $5;'\n async with request.app.pg.acquire() as connection:\n rows = await connection.fetch(\n query_str, user_id, date_from, date_to, limit, offset)\n else:\n query_str += ' ORDER BY rnt.date_to DESC;'\n async with request.app.pg.acquire() as connection:\n rows = await connection.fetch(query_str, user_id, date_from, date_to)\n\n if rows is not None:\n ret_val = [dict(x) for x in rows]\n\n except Exception as gclerr:\n logger.error('get_rent_list service erred with: {}'.format(gclerr))\n\n return ret_val", "def test_date_range(self):\n\n url = '/%s/job-types/status/?started=%s&ended=%s' % ( self.api,\n '2015-01-01T00:00:00Z',\n '2015-01-02T00:00:00Z')\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 4)", "def getStockList(storeExcel=False, path=None):\n import time\n\n start = time.time()\n stockList = Custom().ScreenerView(columns=[0,1,2,3,4,5,6,7,8,25,30,65,66,67])\n end = time.time()\n\n print('Took {0} Min and {1} Seconds to Query'.format((end - start)//60, (end-start)%60))\n\n if storeExcel:\n stockList.to_excel(path)\n\n return stockList", "def inventoryByYear(self):\n\n\t\tcurrentYear = date.today().year\n\t\tinventory = {}\n\n\t\tfor bottle in self.bottles:\n\t\t\tif bottle.consumption == None:\n\t\t\t\tholdYear = max(currentYear, bottle.hold_until)\n\t\t\t\tif holdYear not in inventory:\n\t\t\t\t\tinventory[holdYear] = 1\n\n\t\t\t\telse:\n\t\t\t\t\tinventory[holdYear] += 1\n\n\t\treturn inventory", "def start_end_date(start_date=dt.date.today().strftime('%Y-%m-%d'),end_date = dt.date.today().strftime('%Y-%m-%d')):\n\n sel =[func.min(Measurement.tobs),\n func.avg(Measurement.tobs),\n func.max(Measurement.tobs)\n ]\n \n result = session.query(*sel).filter((Measurement.date >= start_date) & (Measurement.date <= end_date)).all()\n \n temperature_values = {}\n calculated_values = []\n \n temperature_values[\"min_temp\"] = result[0][0]\n temperature_values[\"avg_temp\"] = result[0][1]\n temperature_values[\"max_temp\"] = result[0][2]\n calculated_values.append(temperature_values)\n \n return jsonify(calculated_values)", "def fetch_mopub_report(start_date, end_date, mopub_inventory_report_id, \r\n mopub_api_key):\r\n start_date = datetime.datetime.fromisoformat(start_date)\r\n end_date = datetime.datetime.fromisoformat(end_date)\r\n\r\n date_range = end_date - start_date\r\n\r\n days = date_range.days\r\n\r\n single_day = datetime.timedelta(days=1)\r\n d = start_date\r\n date_string_container = []\r\n for num in range(days+1):\r\n d_string = d.strftime('%Y-%m-%d')\r\n # print(d_string)\r\n date_string_container.append(d_string)\r\n d += single_day\r\n \r\n df_container = []\r\n \r\n for date in date_string_container:\r\n print(\"Fetching MoPub data for {}...\".format(date))\r\n csv_url = 'https://app.mopub.com/reports/custom/api/download_report?report_key={}&api_key={}&date={}'.format(mopub_inventory_report_id, mopub_api_key, date)\r\n df = pd.read_csv(csv_url)\r\n df_container.append(df)\r\n \r\n df_concat = pd.concat(df_container, axis=0)\r\n \r\n return df_concat", "def get_all_ingredients(self) -> List[str]:\n return [ingredient for ingredient in self.inventory_availability]" ]
[ "0.5880754", "0.55358046", "0.55146176", "0.54974526", "0.54716426", "0.54641354", "0.5431443", "0.5413283", "0.5400029", "0.53998613", "0.5383372", "0.5372556", "0.5365455", "0.5363467", "0.5363467", "0.5358362", "0.5348541", "0.53345364", "0.5328369", "0.53199214", "0.530684", "0.5281139", "0.5279798", "0.52777106", "0.5256574", "0.51891476", "0.5188826", "0.5152989", "0.51402503", "0.5125461", "0.5109149", "0.5088914", "0.5084773", "0.5080048", "0.5067507", "0.5056569", "0.50543505", "0.50489587", "0.5040222", "0.5037118", "0.5035941", "0.50231296", "0.5019722", "0.5011389", "0.5009109", "0.50063604", "0.49898854", "0.49854282", "0.49829346", "0.49817452", "0.4974989", "0.4973225", "0.4954304", "0.4949854", "0.49494955", "0.4928186", "0.4916846", "0.49144912", "0.49140134", "0.49112487", "0.49057093", "0.4902278", "0.48934516", "0.48903596", "0.4884057", "0.4881866", "0.48786953", "0.48782346", "0.48779878", "0.4874232", "0.48732594", "0.48673895", "0.4858296", "0.4851151", "0.48452204", "0.48441744", "0.4842811", "0.4840784", "0.4832553", "0.48251152", "0.4816324", "0.48142198", "0.48096213", "0.48051304", "0.48046273", "0.4802372", "0.48016518", "0.4800299", "0.47983593", "0.47981745", "0.47971004", "0.47888324", "0.47863322", "0.4784437", "0.47839573", "0.47815692", "0.4778893", "0.47718686", "0.47695726", "0.47687995" ]
0.71391815
0
Hamming Heuristic (admissible) provides very little speedup, but it's one line and admissible
def spotlessroomba_first_heuristic(state : SpotlessRoombaState) -> float: # TODO a nontrivial admissible heuristic return len(state.dirty_locations)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def chk_hamming(data):\n pass", "def hamming_algorithm(data: bytearray):\n print(f\"data: {data}\")\n # 12345678 12345678 12345678 12345678\n if len(data) % 4 != 0:\n diff = 4 - len(data) % 4\n data += bytes(diff)\n m = len(data)\n r = 0\n chunck = 0\n i = 0\n ret_data = bytearray()\n while i < m // 4:\n chunck = struct.unpack(\"I\", data[i*4:i*4 + 4])[0]\n chunck, chunck_str = insert_redundant_bits(chunck)\n print(f\"chunck: {chunck} chunck_str:{chunck_str}\")\n i += 1", "def Hamming(data):\r\n N=float(data.shape[0])\r\n temp=np.zeros(data.shape[0])\r\n for u, i in enumerate(data):\r\n temp[u]=(0.54-0.46*np.cos(2*np.pi*(u/N)))*i\r\n return temp", "def manhattan_heuristic(state):\n man_h = 0\n size = len(state)\n for i in range (size):\n for j in range (size):\n if state[i][j] == 0:\n continue\n else:\n man_h = man_h + abs(i - int(state[i][j]/3)) + abs(j - (state[i][j])%3)\n return man_h", "def apply_hamming(frames, inv=False):\n M = frames.shape[1]\n win = np.hamming(M)**(-1) if inv else np.hamming(M)\n return frames * win", "def hamming_sim(s1, s2):\n\n if s1 is None or s2 is None:\n return np.NaN\n if pd.isnull(s1) or pd.isnull(s2):\n return np.NaN\n\n # Create the similarity measure object\n measure = sm.HammingDistance()\n\n s1 = gh.convert_to_str_unicode(s1)\n s2 = gh.convert_to_str_unicode(s2)\n\n # Call the function to compute the similarity score.\n return measure.get_sim_score(s1, s2)", "def hamming(x, y):\n\n # The implem is 'symbolic', meaning that vectors can be coded not with just\n # zeros and ones, but with anything at all.\n # hamming([0, 1], [0, 1]) = 0 + 0\n # hamming([0, 1], [0, 2]) = 0 + 1\n # hamming([0, 1], [0, 5]) = 0 + 1\n # This is usefull for the monk2 dataset for ex., where some binary features\n # are coded as '1' or '2'.\n return sum(xi != yi for (xi,yi) in zip(x, y))", "def minHamm(text,pattern):\r\n D=kmersfrequency(text,len(pattern))\r\n return (min([(HammingDistance(pattern,x)) for x in D.keys()]))", "def test_loss_hamiltonian_incomplete(self):\n g = nx.lollipop_graph(4, 1).to_directed()\n edge_weight_data = {edge: (i + 1) * 0.5 for i, edge in enumerate(g.edges)}\n for k, v in edge_weight_data.items():\n g[k[0]][k[1]][\"weight\"] = v\n h = loss_hamiltonian(g)\n\n expected_ops = [\n qml.PauliZ(0),\n qml.PauliZ(1),\n qml.PauliZ(2),\n qml.PauliZ(3),\n qml.PauliZ(4),\n qml.PauliZ(5),\n qml.PauliZ(6),\n qml.PauliZ(7),\n qml.PauliZ(8),\n qml.PauliZ(9),\n qml.PauliZ(10),\n qml.PauliZ(11),\n qml.PauliZ(12),\n qml.PauliZ(13),\n ]\n expected_coeffs = [\n np.log(0.5),\n np.log(1),\n np.log(1.5),\n np.log(2),\n np.log(2.5),\n np.log(3),\n np.log(3.5),\n np.log(4),\n np.log(4.5),\n np.log(5),\n np.log(5.5),\n np.log(6),\n np.log(6.5),\n np.log(7),\n ]\n\n assert expected_coeffs == h.coeffs\n assert all([op.wires == exp.wires for op, exp in zip(h.ops, expected_ops)])\n assert all([type(op) is type(exp) for op, exp in zip(h.ops, expected_ops)])", "def hamming_dist(a_b, b_b):\n return sum(bin(a_b[n] ^ b_b[n]).count('1') for n in range(len(a_b)))", "def heuristic(state, puzzle):\n h = 0\n for i in range(puzzle.dimension):\n for j in range(puzzle.dimension):\n # (0, 0) -> 1 as value, (0, 2) -> 3 as value, etc\n value = i * puzzle.dimension + j + 1\n if value == puzzle.dimension ** 2: # value is ' '\n value = ' '\n current_position = puzzle.get_coordinates(state, value)\n goal_position = (i, j)\n h += util.manhattanDistance(current_position, goal_position)\n h /= 2\n return h", "def hamming_dist(s1, s2):\n\n if s1 is None or s2 is None:\n return np.NaN\n if pd.isnull(s1) or pd.isnull(s2):\n return np.NaN\n\n # Create the similarity measure object\n measure = sm.HammingDistance()\n\n s1 = gh.convert_to_str_unicode(s1)\n s2 = gh.convert_to_str_unicode(s2)\n\n\n # Call the function to compute the distance\n return measure.get_raw_score(s1, s2)", "def HammingDistance(p, q):\r\n if len(p) != len(q):\r\n return -1\r\n dist = 0\r\n #zip(AB,CD) gives (('A','C'),('B','D'))\r\n for first, second in zip(p, q):\r\n if first != second:\r\n dist = dist + 1\r\n return dist", "def hamming(s1, s2):\n assert len(s1) == len(s2)\n return sum(c1 != c2 for c1, c2 in zip(s1, s2))", "def hamming_distance(a, b):\n return np.count_nonzero(a != b)", "def misplaced_heuristic(state):\n msp_h = 0\n size = len(state)\n for i in range (size):\n for j in range (size):\n if state[i][j] == 0:\n continue\n elif state[i][j] != i*size + j:\n msp_h += 1\n return msp_h", "def hamming_distance(words: Iterator[str], vocabulary: Dict[str, int]):\n\n for word in words:\n distances = []\n suggestions = []\n vocab_list = list(vocabulary)\n for (i,vocab) in enumerate(vocab_list):\n if len(vocab) == len(word):\n distances.append(hamming(word, vocab))\n else:\n distances.append(120)\n \n idx = np.array(distances).argsort()[:5]\n \n for i in range(5):\n for j in range(i+1,5):\n if distances[idx[i]] == distances[idx[j]]:\n if vocabulary.get(vocab_list[idx[i]]) < vocabulary.get(vocab_list[idx[j]]):\n temp = idx[i] \n idx[i] = idx[j]\n idx[j] = temp \n\n for i in idx:\n suggestions.append(vocab_list[i])\n\n output(\"{misspelled}\\t{corrections}\".format(\n misspelled=word,\n corrections=\"\\t\".join(suggestions)\n )) # may cause IO bottleneck", "def Hamiltonian(self):\n return None", "def heuristicManhattan(state):\n t = state.node.getTiles()\n tArray = [t[i:i+3] for i in range(0, 9, 3)]\n heuristik = 0\n for row in range(len(tArray)):\n for col in range(len(tArray[row])):\n if tArray[row][col] == 1:\n heuristik += abs(row) + abs(col - 1)\n elif tArray[row][col] == 2:\n heuristik += abs(row) + abs(col - 2)\n elif tArray[row][col] == 3:\n heuristik += abs(row - 1) + abs(col)\n elif tArray[row][col] == 4:\n heuristik += abs(row - 1) + abs(col - 1)\n elif tArray[row][col] == 5:\n heuristik += abs(row - 1) + abs(col - 2)\n elif tArray[row][col] == 6:\n heuristik += abs(row - 2) + abs(col)\n elif tArray[row][col] == 7:\n heuristik += abs(row - 2) + abs(col - 1) \n elif tArray[row][col] == 8:\n heuristik += abs(row - 2) + abs(col - 2)\n return heuristik", "def hamdist(inp):\n\treturn sum(c1 != c2 for c1, c2 in itertools.izip(inp[0],inp[1]))", "def algorithm_h(n, m):\n partition = [1]*m\n partition[0] = n - m + 1\n\n while True:\n yield partition[:]\n if partition[1] < partition[0] - 1:\n partition[0] -= 1\n partition[1] += 1\n else:\n j = 2\n s = partition[0] + partition[1] - 1\n while j < m and partition[j] >= partition[0] - 1:\n s += partition[j]\n j += 1\n if j >= m:\n return\n replacement = partition[j] + 1\n partition[j] = replacement\n j -= 1\n while j > 0:\n partition[j] = replacement\n s -= replacement\n j -= 1\n partition[0] = s", "def hamming2(s1, s2):\n assert len(s1) == len(s2)\n return sum(c1 != c2 for c1, c2 in zip(s1, s2))", "def hamming_weight(num):\n\n return bin(num).count(\"1\");", "def edgeHam( H, Jf, repNum, repString, adjacencyList ):\n \n flipVal = -.5 * Jf\n opVal = .25 * Jf\n sameVal = -.25 * Jf\n \n # Cycle throught the adjacencies\n for (i,j) in adjacencyList:\n \n si,sj = repString[i], repString[j]\n \n if si != sj:\n # Raising / Lowering terms will flip the two and spit out -1/2 Jf\n # We have to do some wonky stuff because you can't do element \n # assignment with strings\n flipString = [c for c in repString]\n flipString[i] = sj\n flipString[j] = si\n flipString = ''.join( flipString )\n flipNum = int( flipString, base = 2 )\n \n H[(repNum, flipNum)] += flipVal\n \n # Opposite spins will get a 1/4 from the s_z terms\n H[(repNum, repNum)] += opVal\n \n else:\n # Same spins will get a -1/4 from the s_z terms\n H[(repNum, repNum)] += sameVal", "def test_binary_hamming_distance_differentiability(self, inputs):\n preds, target = inputs\n self.run_differentiability_test(\n preds=preds,\n target=target,\n metric_module=BinaryHammingDistance,\n metric_functional=binary_hamming_distance,\n metric_args={\"threshold\": THRESHOLD},\n )", "def compute_hamming_distance(str1, str2):\n\n mismatches = 0\n len_strs = len(str1)\n for i in range(len_strs):\n if str1[i] != str2[i]:\n mismatches = mismatches + 1\n return mismatches", "def hammingLoss(y_test, predictions):\n hammingloss = 0.0\n for i in range(y_test.shape[0]):\n aux = 0.0\n for j in range(y_test.shape[1]):\n if int(y_test[i,j]) != int(predictions[i,j]):\n aux = aux+1.0\n aux = aux/y_test.shape[1]\n hammingloss = hammingloss + aux\n \n return hammingloss/y_test.shape[0]", "def hamming_distance(input1, input2):\n if len(input1) != len(input2):\n raise ValueError('Length of input1 and input2 are not equal.')\n input1 = hex_decode(hex_encode(input1))\n input2 = hex_decode(hex_encode(input2))\n # the general strategy here is to xor the two strings together\n # and then just count the number of 1s in the output (i.e., where the\n # two strings differed).\n output = fixed_xor(input1, input2)\n distance = 0\n for byte in output:\n for i in range(8):\n bit_mask = 1 << i\n if (bit_mask & byte) == bit_mask:\n distance += 1\n return distance", "def MorseMatchingHomotopy(M, cellcomplex):\n # Semantic sugar\n def bd(chain): return cellcomplex.boundary(chain)\n def dim(chain_or_cell): return chain_or_cell.dimension()\n def isAce(cell): return dim(cell) == dim(M(cell))\n def isKing(cell): return dim(cell) > dim(M(cell))\n def isQueen(cell): return dim(cell) < dim(M(cell))\n # Compute critical cells from matching\n critical_cells = [ cell for cell in cellcomplex if isAce(cell) ]\n # Homotopy function definition\n def homotopy(chain):\n \"\"\"\n Implement the discrete Morse homotopy gamma\n \"\"\"\n # We clone the input chain to prevent unexpected alterations\n work_chain = copy.deepcopy(chain)\n\n # We create a dictionary \"priority\" which gives the rank of queens.\n # Lower priority numbers will be processed first (i.e. priority ranking, not priority value)\n Queens = [Q for Q in cellcomplex if isQueen(Q)]\n def AdjacentQueens(Q): return [ q for q in bd(M(Q)) if isQueen(q) and q != Q ]\n priority = { Q : rank for (rank, Q) in enumerate(TopologicalSort(Queens, AdjacentQueens)) }\n\n # We arrange the priority queue for queens.\n # We use an auxiliary set \"enqueued\" to prevent the same queen from being\n # placed in the priority queue twice.\n work_queue = PriorityQueue()\n enqueued = set()\n def enqueue(list_of_queens):\n for Q in list_of_queens:\n if Q in enqueued: continue\n enqueued.add(Q)\n work_queue.put((-priority[Q], Q))\n\n # Initialize queue with the queens in the original chain\n enqueue([ Q for Q in work_chain if isQueen(Q) ])\n\n # Make a zero chain of correct dimension to store result in\n gamma_chain = Chain(dim(chain) + 1, cellcomplex.ring())\n\n # We iteratively process the maximal queen in \"work_chain\", each time\n # adding the appropriate multiple of the boundary of its mating king in \n # order to cancel it. Doing this can add new queens, which we enqueue.\n # A theorem prevents previously processed queens from being \"new_queens\" \n # We keep track of the king chain as we go.\n while not work_queue.empty():\n (rank, Q) = work_queue.get()\n a = work_chain[Q]\n if a == 0: continue\n K = M(Q)\n bd_K = bd(K)\n b = bd_K[Q]\n c = -a/b\n gamma_chain[K] += c\n work_chain += c * bd_K\n enqueue([ q for q in bd_K if isQueen(q) and q != Q ])\n return gamma_chain\n return (critical_cells, homotopy)", "def bit_contribution_test(hash_function):\n\n model = hash_function()\n hash_list = []\n zero_str = '0' * 2048\n for i in range(1, 2049):\n for j in range(0, i):\n flip_str = zero_str[:j] + '1' + zero_str[j+1:i]\n hash_list.append(list(map(int, list(msg_to_bits.pad_msg(flip_str, i)))))\n if i % 200 == 0:\n print(i)\n\n hashed_dict = dict()\n collisions = 0\n i = 0\n for to_hash in hash_list:\n i += 1\n hash_val = model.hash(to_hash, False).tostring()\n if hash_val in hashed_dict:\n collisions += 1\n hashed_dict[hash_val] = True\n if i % 10000 == 0:\n print(i)\n\n return collisions", "def is_harshad(n):\n return n % euler.sum_digits(n) == 0", "def calc_heuristic(self, state):\n h = 0\n board = state.board.array\n\n for i in range(self._n):\n for j in range(self._n):\n\n if board[i][j] != space_rep:\n tile_as_number = board[i][j]\n correct_x = (tile_as_number - 1) // self._n\n correct_y = (tile_as_number - 1) % self._n\n else:\n continue\n h += calc_diffs(i, j, correct_x, correct_y)\n return h", "def hamming_dist(v1, v2):\r\n edits = (v1 != v2)\r\n return edits.sum()", "def pre_compute_hashes(s, M1, M2, X):\n n = len(s)\n h1 = [0 for _ in range(n+1)]\n h2 = [0 for _ in range(n+1)]\n for i in range(1, n+1):\n ch = ord(s[i-1])\n h1[i] = (X*h1[i-1] + ch) % M1\n h2[i] = (X*h2[i-1] + ch) % M2\n return h1, h2", "def hamming_ball(s, n, alphabet=['A', 'C', 'G', 'T']):\n return itertools.chain.from_iterable(hamming_circle(s, i, alphabet)\n for i in range(n + 1))", "def hamming_distance(p, q):\n result = 0\n for x, y in zip(p, q):\n if x != y:\n result += 1\n return result + abs(len(p) - len(q))", "def test_bit_driver_output(self):\n\n H = qaoa.bit_driver(range(3), 1)\n hamiltonian = qml.Hamiltonian([1, 1, 1], [qml.PauliZ(0), qml.PauliZ(1), qml.PauliZ(2)])\n\n assert decompose_hamiltonian(H) == decompose_hamiltonian(hamiltonian)", "def hamming_encode(self,parameter):\n\n bits = np.array(self.image_bits)\n code = komm.HammingCode(parameter)\n \n if (len(bits)%code.dimension > 0):\n \n bits = np.append(bits, [np.zeros(self.calculate_zeros_addition_Hamming(parameter),dtype = np.uint8)])\n number_of_arrays = int(len(bits)/code.dimension)\n parts_to_encode = np.reshape(bits,(number_of_arrays,-1),order ='C')\n\n encoded_parts =[]\n for i in range (0, len(parts_to_encode)):\n encoded_part = code.encode(parts_to_encode[i])\n encoded_parts.append(encoded_part)\n encoded_parts = np.array(encoded_parts)\n\n return encoded_parts\n\n elif (len(bits)%code.dimension == 0):\n number_of_arrays = int(len(bits)/code.dimension)\n parts_to_encode = np.reshape(bits,(number_of_arrays,-1),order ='C')\n\n encoded_parts =[]\n for i in range (0, len(parts_to_encode)):\n encoded_part = code.encode(parts_to_encode[i])\n encoded_parts.append(encoded_part)\n encoded_parts = np.array(encoded_parts)\n\n return encoded_parts", "def hamming(M):\n if M < 1:\n return array([])\n if M == 1:\n return ones(1,float)\n n = arange(0,M)\n return 0.54-0.46*cos(2.0*pi*n/(M-1))", "def hamming(s1, s2):\n weight = abs(len(s1)-len(s2))\n if len(s1) < len(s2):\n s1, s2 = s2, s1\n for i in range(len(s2)):\n weight += not s1[i] == s2[i]\n return weight", "def hamming_dist(bytes1, bytes2):\n if type(bytes1) == str:\n bytes1 = [ord(c) for c in str1]\n if type(bytes2) == str:\n bytes2 = [ord(c) for c in str2]\n bins = [bin(o1 ^ o2) for o1, o2 in zip(bytes1, bytes2)]\n return len([i for i in ''.join(bins) if i == '1'])", "def hamming_byte(bin1, bin2):\n\n diffs = 0\n xored = xor(bin1, bin2)\n for byte in xored:\n diffs += bin(byte).count(\"1\")\n return diffs", "def evaluate_hamming_loss(predict, truth):\n predict_max = predict.gt(0.5).long()\n\n batch_eq_num = torch.ne(predict_max, truth).long().sum().item()\n batch_num, label_num = predict_max.shape\n\n return batch_eq_num * 1.0 / (batch_num * label_num)", "def __h1(self): # _misplace_tiles\n current = [item for row in self.arr for item in row]\n # here I am counting 0 as a tile\n goal = [i for i in range(self.board_size * self.board_size)]\n h1 = sum([int(current[i] != goal[i])\n for i in range(self.board_size * self.board_size)])\n return h1", "def hamming_distance(bytes_0: bytes, bytes_1: bytes) -> int:\n assert len(bytes_0) == len(bytes_1)\n return sum(sum(bits(byte_0 ^ byte_1)) for (byte_0, byte_1) in zip(bytes_0, bytes_1))", "def nullHeuristic(state, problem=None):\n \n return 0", "def hamming_distance(h1, h2):\n b1 = bitarray.bitarray()\n b1.frombytes(h1)\n b2 = bitarray.bitarray()\n b2.frombytes(h2)\n return bitarray.bitdiff(b1, b2)", "def triHam( H, Ja, repNum, repString, adjacencyList ):\n \n flipVal = .5 * Ja\n opVal = -.25 * Ja\n sameVal = .25 * Ja\n \n for (i,j) in adjacencyList:\n si,sj = repString[i], repString[j]\n \n if si != sj:\n # Raising / Lowering terms will flip the two and spit out 1/2 Ja\n # We have to do some wonky stuff because you can't do element \n # assignment with strings\n flipString = [c for c in repString]\n flipString[i] = sj\n flipString[j] = si\n flipString = ''.join( flipString )\n flipNum = int( flipString, base = 2 )\n \n H[(repNum, flipNum)] += flipVal\n \n # Opposite spins will get a -1/4 from the s_z terms\n H[(repNum, repNum)] += opVal\n \n else:\n # Same spins will get a +1/4 from the s_z terms\n H[(repNum, repNum)] += sameVal", "def hamming_distance(s1, s2):\n assert(len(s1) == len(s2))\n return np.sum([1 if c1 != c2 else 0 for c1, c2 in zip(s1, s2)])", "def hammingDist(x, y):\n hd = 0\n for ch1, ch2 in zip(x, y):\n if ch1 != ch2:\n hd += 1\n return hd", "def nullHeuristic(state, problem=None):\r\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def nullHeuristic(state, problem=None):\n return 0", "def hamming(s1, s2):\n s1 = str(s1)\n s2 = str(s2)\n if len(s1) != len(s2):\n raise ValueError(\"Undefined for sequences of unequal length.\")\n return sum(el1 != el2 for el1, el2 in zip(s1, s2))", "def sugg(n):\n print (\"%s\\t\"*3)%(\"p\", \"m(bytes)\", \"ok\")\n for p in (0.1, 0.01, 0.001, 0.0001, 0.00001):\n m=BloomFilter.calBitLen(n,p)\n ok=BloomFilter.calHash(n,m)\n print (\"%.5f\\t\"+\"%d\\t\"*2)%(p, m/8, ok)\n for k in BloomFilter.KRange:\n rp=BloomFilter.calPFP(n,m,k)\n print (\"\\t\"*2+\"%d\\t%f\")%(k, rp)" ]
[ "0.75875574", "0.6815475", "0.68022764", "0.626976", "0.62238514", "0.6208715", "0.6082887", "0.60824615", "0.6025382", "0.60161185", "0.5995922", "0.5990126", "0.5911016", "0.5893186", "0.5888975", "0.5871593", "0.58487594", "0.58374834", "0.5815022", "0.5806226", "0.57874274", "0.5786353", "0.5768705", "0.57675695", "0.57587016", "0.5757868", "0.57395756", "0.5739036", "0.5727189", "0.5719728", "0.57092494", "0.5706338", "0.57059795", "0.5704375", "0.56991893", "0.5693814", "0.568209", "0.5677082", "0.5675999", "0.5655754", "0.5654626", "0.5653294", "0.5649533", "0.5620777", "0.5610386", "0.5609101", "0.560215", "0.5599648", "0.5589376", "0.5582736", "0.5574422", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.5564721", "0.55483454", "0.55483454", "0.55483454", "0.55483454", "0.55483454", "0.55483454", "0.55483454", "0.55483454", "0.55483454", "0.5544315", "0.5530312" ]
0.0
-1
Best Path Heuristic (consistent) (seems to be a very good heuristic) Gives the roomba the ability to pass through walls and ignore additional cost on carpet 1. Find which dirty tile is best to start from For each dirty tile in state.dirty_locations 1.1 Set it as the start node 1.2 Use Total Manhattan Distance(third heuristic) to find route of least cost to visit every other dirty tile 1.3 Compare with previous start tile, and keep the better start (tiebreak with roomba proximity to start tile) 2. Find roomba proximity to the best start tile 3. Add the results of steps 1 and 2 The heuristic is the sum of the distance to the best start tile and the cost from said tile
def spotlessroomba_second_heuristic(state : SpotlessRoombaState) -> float: # TODO a nontrivial consistent heuristic if not state.dirty_locations: return 0 best_start = 0 # best dirty tile to start from best_cost = INF # cost of the path from the above start tile for i in range(len(state.dirty_locations)): estimate_cost = 0 lowest_cost = INF closest_dirty = 0 dirty_locations = list(state.dirty_locations) current_pos = dirty_locations.pop(i) # find the shortest cost solution path from this starting tile while dirty_locations: for j in range(len(dirty_locations)): manhattan = abs(current_pos.row - dirty_locations[j].row) + abs(current_pos.col - dirty_locations[j].col) if manhattan < lowest_cost: lowest_cost = manhattan closest_dirty = j estimate_cost += lowest_cost current_pos = dirty_locations.pop(closest_dirty) lowest_cost = INF # if estimated path cost is cheaper than best path cost so far, replace best_cost and best_start if estimate_cost < best_cost: best_cost = estimate_cost best_start = i # if estimated path cost and best path cost so far are equal, tiebreak with proximity to start tile if estimate_cost == best_cost: current_pos = state.position dist_to_prev_best = abs(current_pos.row - state.dirty_locations[best_start].row) + abs(current_pos.col - state.dirty_locations[best_start].col) dist_to_i = abs(current_pos.row - state.dirty_locations[i].row) + abs(current_pos.col - state.dirty_locations[i].col) if dist_to_i < dist_to_prev_best: best_start = i current_pos = state.position # Calculate distance to the best start tile dist_to_start = abs(current_pos.row - state.dirty_locations[best_start].row) + abs(current_pos.col - state.dirty_locations[best_start].col) # Returned heuristic is the sum of distance to the start tile and estimated cost from said tile return dist_to_start + best_cost
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_heuristic(self, state):\n\n def get_manhattan_distance(coord_a, coord_b):\n \"\"\"Returns the manhattan distance between coord_a and coord_b.\"\"\"\n return abs(coord_a.x - coord_b.x) + abs(coord_a.y - coord_b.y)\n\n \n def get_num_obstacles(coord_a, coord_b):\n \"\"\"Returns the number of obstacles (wriggler segments or walls) between\n coord_a and coord_b.\n \n This function assumes that coord_b is larger (in either/both x and y)\n than coord_a.\n \"\"\"\n obstacle_count = 0\n \n for x in range(coord_a.x, coord_b.x + 1):\n for y in range(coord_a.y, coord_b.y + 1):\n coord = Coordinate(x, y)\n if coord in self.wall_coords or coord in state:\n obstacle_count += 1\n \n return obstacle_count\n\n\n head_coord = state.wriggler_list[0].get_head()\n tail_coord = state.wriggler_list[0].get_tail()\n \n head_manhattan_distance = get_manhattan_distance(head_coord, self.goal_coord)\n tail_manhattan_distance = get_manhattan_distance(tail_coord, self.goal_coord)\n \n # Calculate and return heuristic value depending on which heuristic to use\n if self.heuristic == Heuristic.MANHATTAN_DIST:\n # Return the shortest Manhattan distance of wriggler0's tail or head to the goal\n return min(head_manhattan_distance, tail_manhattan_distance)\n \n else: # self.heuristic == Heuristic.NUM_OBSTACLES:\n # Return the number of obstacles between wriggler0's tail/head to the goal\n # The tail/head is selected based on which is closer to the goal\n if head_manhattan_distance <= tail_manhattan_distance:\n # The head is closer or the same distance away\n return get_num_obstacles(head_coord, self.goal_coord)\n \n else:\n # The tail is closer\n return get_num_obstacles(tail_coord, self.goal_coord)", "def spotlessroomba_third_heuristic(state : SpotlessRoombaState) -> float:\n h = 0\n current_position = state.position\n dirty_locations = list(state.dirty_locations)\n partial_heuristic = INF\n closest_dirty = 0\n\n while dirty_locations:\n for i in range(len(dirty_locations)):\n manhattan = abs(current_position.row - dirty_locations[i].row) + abs(current_position.col - dirty_locations[i].col)\n if manhattan < partial_heuristic:\n partial_heuristic = manhattan\n closest_dirty = i\n h += partial_heuristic\n current_position = dirty_locations.pop(closest_dirty)\n partial_heuristic = INF\n \n return h", "def spotlessroomba_first_heuristic(state : SpotlessRoombaState) -> float:\n # TODO a nontrivial admissible heuristic\n return len(state.dirty_locations)", "def extra(maze):\n # TODO: Write your code here\n heuristic_lookup = {} \n objs = maze.getObjectives()\n corner_list = maze.getObjectives()\n start = maze.getStart()\n path = []\n dim = maze.getDimensions()\n visited = {}\n lookup_table = {}\n p_queue = []\n edgeset = []\n mintree = {}\n start_heuristic = 0 + multi_dot_heuristic_query(maze, start, objs, edgeset, mintree) * 2\n heuristic_lookup[(start, tuple(objs))] = start_heuristic\n start_state = state(start, corner_list)\n lookup_table[state(start, corner_list)] = (start_heuristic, 0, state((-2, -2)))\n p_queue.append((start_heuristic, state(start, corner_list)))\n while p_queue:\n pair = p_queue.pop(0)\n visited[pair[1]] = lookup_table.get(pair[1])[2]\n if not pair[1].getlist():\n current_state = pair[1]\n while current_state != start_state:\n path.append(current_state.getpos())\n current_state = visited.get(current_state)\n path.append(start)\n path.reverse()\n return path\n else: \n list_of_neighbors = maze.getNeighbors(pair[1].getpos()[0], pair[1].getpos()[1])\n for coordinates in list_of_neighbors:\n current_state = state(coordinates)\n if coordinates in pair[1].getlist():\n new_list = copy.copy(pair[1].getlist())\n new_list.remove(coordinates)\n current_state = state(coordinates, new_list)\n else:\n current_state = state(coordinates, pair[1].getlist()) \n if current_state in visited:\n continue\n if current_state in lookup_table:\n if (lookup_table.get(current_state)[0], current_state) in p_queue:\n cost = lookup_table.get(pair[1])[1] + 1\n queried_heuristic = 0\n if (current_state.getpos(), tuple(current_state.getlist())) in heuristic_lookup:\n queried_heuristic = heuristic_lookup.get((current_state.getpos(), tuple(current_state.getlist())))\n else:\n queried_heuristic = multi_dot_heuristic_query(maze, current_state.getpos(), current_state.getlist(), edgeset, mintree) * 2\n heuristic_lookup[(current_state.getpos(), tuple(current_state.getlist()))] = queried_heuristic\n heuristic = queried_heuristic + cost\n old_heuristic = lookup_table.get(current_state)[0]\n if heuristic < lookup_table.get(current_state)[0]:\n lookup_table[current_state] = (heuristic, cost, pair[1])\n p_queue.remove((old_heuristic, current_state))\n bisect.insort(p_queue, (heuristic, current_state))\n else:\n cost = lookup_table.get(pair[1])[1] + 1\n queried_heuristic = 0\n if (current_state.getpos(), tuple(current_state.getlist())) in heuristic_lookup:\n queried_heuristic = heuristic_lookup.get((current_state.getpos(), tuple(current_state.getlist()))) \n else:\n queried_heuristic = multi_dot_heuristic_query(maze, current_state.getpos(), current_state.getlist(), edgeset, mintree) * 2\n heuristic_lookup[(current_state.getpos(), tuple(current_state.getlist()))] = queried_heuristic\n heuristic = queried_heuristic + cost\n lookup_table[current_state] = (heuristic, cost, pair[1])\n bisect.insort(p_queue, (heuristic, current_state))\n\n return []", "def foodHeuristic(state, problem):\n position, foodGrid = state\n \"*** YOUR CODE HERE ***\"\n \"\"\"\n Mi heurística consiste en hacer simplemente el máximo de las distancias reales del state a cada nodo con comida\n He provado diferentes heurísticas y esta es la que me expande menos nodos, aunque no es la más óptima temporalmente\n Tardé mucho tiempo en darme cuenta de que había una función que calculaba la distancia real entre dos nodos\n NOTA: NO EJECUTAR CON LABERINTOS MÁS GRANDES QUE EL tinySearch. El algoritmo requiere muchísimo tiempo\n \"\"\"\n max = 0 # Inicializo el máximo en 0\n for food in foodGrid.asList(): # Esto me da cada food como un nodo (x,y), pero sólo los nodos que tengan comida\n distance = mazeDistance(position, food, problem.startingGameState) # Distancia real del state a una comida\n if max < distance: # Cálculo del máximo\n max = distance\n return max\n\n # La siguiente heurística también servía, y de hecho tardaba mucho menos, pero el autograder me daba 2/4\n # ya que se expandían más de 12.000 nodos.\n # return len(foodGrid.asList())", "def astar_multi(maze):\n heuristic_lookup = {} \n objs = maze.getObjectives()\n corner_list = maze.getObjectives()\n start = maze.getStart()\n path = []\n dim = maze.getDimensions()\n visited = {}\n lookup_table = {}\n p_queue = []\n edgeset = []\n mintree = {}\n start_heuristic = 0 + multi_dot_heuristic_query(maze, start, objs, edgeset, mintree) \n heuristic_lookup[(start, tuple(objs))] = start_heuristic\n start_state = state(start, corner_list)\n lookup_table[state(start, corner_list)] = (start_heuristic, 0, state((-2, -2)))\n p_queue.append((start_heuristic, state(start, corner_list)))\n while p_queue:\n pair = p_queue.pop(0)\n visited[pair[1]] = lookup_table.get(pair[1])[2]\n if not pair[1].getlist():\n current_state = pair[1]\n while current_state != start_state:\n path.append(current_state.getpos())\n current_state = visited.get(current_state)\n path.append(start)\n path.reverse()\n return path\n else: \n list_of_neighbors = maze.getNeighbors(pair[1].getpos()[0], pair[1].getpos()[1])\n for coordinates in list_of_neighbors:\n current_state = state(coordinates)\n if coordinates in pair[1].getlist():\n new_list = copy.copy(pair[1].getlist())\n new_list.remove(coordinates)\n current_state = state(coordinates, new_list)\n else:\n current_state = state(coordinates, pair[1].getlist()) \n if current_state in visited:\n continue\n if current_state in lookup_table:\n if (lookup_table.get(current_state)[0], current_state) in p_queue:\n cost = lookup_table.get(pair[1])[1] + 1\n queried_heuristic = 0\n if (current_state.getpos(), tuple(current_state.getlist())) in heuristic_lookup:\n queried_heuristic = heuristic_lookup.get((current_state.getpos(), tuple(current_state.getlist())))\n else:\n queried_heuristic = multi_dot_heuristic_query(maze, current_state.getpos(), current_state.getlist(), edgeset, mintree)\n heuristic_lookup[(current_state.getpos(), tuple(current_state.getlist()))] = queried_heuristic\n heuristic = queried_heuristic + cost\n old_heuristic = lookup_table.get(current_state)[0]\n if heuristic < lookup_table.get(current_state)[0]:\n lookup_table[current_state] = (heuristic, cost, pair[1])\n p_queue.remove((old_heuristic, current_state))\n bisect.insort(p_queue, (heuristic, current_state))\n else:\n cost = lookup_table.get(pair[1])[1] + 1\n queried_heuristic = 0\n if (current_state.getpos(), tuple(current_state.getlist())) in heuristic_lookup:\n queried_heuristic = heuristic_lookup.get((current_state.getpos(), tuple(current_state.getlist())))\n else:\n queried_heuristic = multi_dot_heuristic_query(maze, current_state.getpos(), current_state.getlist(), edgeset, mintree) \n heuristic_lookup[(current_state.getpos(), tuple(current_state.getlist()))] = queried_heuristic\n heuristic = queried_heuristic + cost\n lookup_table[current_state] = (heuristic, cost, pair[1])\n bisect.insort(p_queue, (heuristic, current_state))\n return []", "def foodHeuristic(state, problem):\n import itertools\n\n\n\n def manhattan(startPosition, targetPosition):\n xy1 = startPosition\n xy2 = targetPosition\n return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])\n\n position, foodGrid = state\n\n return len(foodGrid.asList())\n #\n # \"\"\"\n # The below algorithm is from:\n # https://stackoverflow.com/questions/9994913/pacman-what-kinds-of-heuristics-are-mainly-used\n #\n # Find real/manhattan distance between two currently furthest fruits in labyrinth - let's call that x.\n # Find real/manhattan distance from current Pacman position to the closer of previous two fruits - let's call that y.\n # Then, answer is just: x + y.\n # The interpretation of this x + y formula could be something like this:\n #\n # x - either way, you will have to travel this distance, at least at the end\n # y - while you are at the some of the two furthest fruits, it's better to collect\n # the food that is near to it so you don't have to go back\n # \"\"\"\n # maxFoodPairDistance = 0\n #\n # if len(foodGrid.asList()) >= 2:\n #\n # #calculate manhattan/real distance between each pair of food (all permutations in foodGrid) and find the maximum of them, and\n # #store the pair with max distance in maxFoodPair\n # for foodPair in itertools.permutations(foodGrid.asList(),2):\n # #foodPairDistance = mazeDistance(foodPair[0], foodPair[1], problem.startingGameState)\n # foodPairDistance = manhattan(foodPair[0], foodPair[1])\n # if foodPairDistance >= maxFoodPairDistance:\n # maxFoodPairDistance = foodPairDistance\n # maxFoodPair = foodPair\n #\n # #get the real distance between pacman and nearest food among the max distance food pair we get above. Using real distance instead\n # #of manhattan distance here just to \"reduce\" the number of nodes expand to get additional point. But that's a bit of a cheating\n # #because the mazeDistance function use of breadth First search - which itself is a search with nodes expansion not counted here\n # #minPacmanToFoodDistance = min([mazeDistance(position, foodPosition, problem.startingGameState) for foodPosition in maxFoodPair])\n # minPacmanToFoodDistance = min([manhattan(position, foodPosition) for foodPosition in maxFoodPair])\n #\n # #When only one food left, just return the real distance between pacman and food\n # elif len(foodGrid.asList()) == 1:\n # foodPosition = foodGrid.asList()[0]\n # #minPacmanToFoodDistance = mazeDistance(position, foodPosition, problem.startingGameState)\n # minPacmanToFoodDistance = manhattan(position, foodPosition)\n # else:\n # minPacmanToFoodDistance = 0\n #\n # return minPacmanToFoodDistance + maxFoodPairDistance", "def registerInitialState(self, gameState):\n\n # stuff\n self.treeDepth = 4\n self.oldFood = []\n self.lastEatenFood = None\n self.i = 0\n\n\n\n #oldFood\n self.oldFood = self.getFoodYouAreDefending(gameState)\n\n\n self.red = gameState.isOnRedTeam(self.index)\n self.distancer = distanceCalculator.Distancer(gameState.data.layout)\n\n # comment this out to forgo maze distance computation and use manhattan distances\n self.distancer.getMazeDistances()\n\n\n\n \n\n \n # FIND PATROL POINTS\n\n\n\n x = gameState.data.layout.width/2-8\n #print \"WIDTH \", x+4\n\n y1 = gameState.data.layout.height-4\n y2 = 0+4\n\n\n\n point1 = (x,y2)\n point2 = (x,y1)\n topPoints = []\n botPoints = []\n for i in range(0,6):\n xv = x+i\n if not gameState.data.layout.walls[xv][y1]:\n\n newBP = (xv, y1)\n botPoints.append(newBP)\n else:\n newBP = (xv, y1)\n #print newBP, \" in wall\"\n\n if not gameState.data.layout.walls[xv][y2]:\n newTP = (xv, y2)\n topPoints.append(newTP)\n else:\n newTP = (xv, y2)\n #print newTP, \" in wall\"\n\n\n\n\n\n # FIND PATROL POINTS WITH THE SHORTEST PATH\n bestTP = topPoints[0]\n bestBP = botPoints[0]\n\n bestPath = self.getMazeDistance(bestTP,bestBP)\n for tp in topPoints:\n bp = min(botPoints, key=lambda p: self.getMazeDistance(tp, p))\n tempPath = self.getMazeDistance(tp, bp)\n if (tempPath < bestPath):\n bestTP = tp\n bestBP = bp\n bestPath = tempPath\n\n #print \"THE REAL BEST POINTS: \", bestBP, \" \", bestTP, \" \", bestPath\n\n self.patrolPoints = [bestTP,bestBP]\n\n\n\n\n\n\n import __main__\n if '_display' in dir(__main__):\n self.display = __main__._display", "def astar_corner(maze):\n # TODO: Write your code here\n \"\"\"\n Plan:\n Do normal a* but then .clear visited after each new goal is found\n new h = Manhattan distance to the nearest goal and then the manhattan distance to the other goals starting from this nearest goal. \n new priority queue -- tuple (f, x&y, goals_left, \n \"\"\"\n pq = []\n visited = {}\n\n goals = maze.getObjectives()\n start = maze.getStart()\n\n tie = 1\n #\n # tuple = (f,g,h,x&y,tiebreaker, goals left, currpath, visited)\n f = min_manhattan(goals, start)\n curr = (f, 0, f, start, goals, 0, [])\n heapq.heappush(pq, curr)\n\n food = None\n while len(pq) > 0:\n curr = heapq.heappop(pq)\n #print(\"curr:\", curr)\n if curr[3] in curr[4]:\n curr[4].remove(curr[3])\n if len(curr[4]) == 0:\n #print(\"DONE\")\n #print(food)\n food = curr\n break\n neighbors = maze.getNeighbors(curr[3][0], curr[3][1])\n for n in neighbors:\n curr_goals_left = curr[4].copy()\n curr_visited = curr[6].copy()\n tie += 1\n #print(\"curr[6]: \", curr[6])\n #print(\"n: \", n)\n #print(\"curr[4]: \", curr[4])\n h2 = min_manhattan(curr[4], n)\n f2 = h2 + curr[1]\n g2 = curr[1] + 1\n\n node_new = (f2, g2, h2, n, curr_goals_left, tie, curr_visited)\n \n if node_new[3] not in visited or node_new[4] not in visited[node_new[3]][1]:\n if node_new[3] not in visited:\n visited[node_new[3]] = (node_new[3], [])\n visited[node_new[3]][1].append(node_new[4])\n node_new[6].append(curr[3])\n heapq.heappush(pq, node_new)\n\n if food is None:\n return []\n\n food[6].append(food[3])\n\n return food[6]", "def cornersHeuristic(state, problem):\n\n # Useful information.\n # corners = problem.corners # These are the corner coordinates\n # walls = problem.walls # These are the walls of the maze, as a Grid.\n\n # *** Your Code Here ***\n corners = problem.corners # These are the corner coordinates\n # walls = problem.walls # These are the walls of the maze, as a Grid.\n\n # Get unvisited corners\n successor = [False, False, False, False]\n currentPosition = state[0]\n currentStatus = state[1]\n\n # Take the manhattan distance of the nodes\n # current position and all corners tuple location\n # Iterate through all corners\n for corner in range(len(corners)):\n successor[corner] = distance.manhattan(currentPosition,\n corners[corner]) * (not currentStatus[corner]) # Ignore corners already visited\n return max(successor) # Return the max value from all calculated manhattan values of all corner", "def astar(grid, heuristic):\r\n evaluatedMap = {}\r\n unevaluatedMap = {}\r\n start = grid.getStart()\r\n goal = grid.getGoals()[0]\r\n startG = 0\r\n startH = heuristic(start,goal)\r\n currentNode = Node(start,startH,startG)\r\n unevaluatedMap[currentNode.coord] = currentNode\r\n \r\n while len(unevaluatedMap) > 0:\r\n # I tried using a PriorityQueue but because a node could end up with \r\n # an updated priority it really didn't make sense to use one and\r\n # instead had to just serach the dictionary each time for the smallest\r\n # priority which is the sum of g and h\r\n currentNode = min(unevaluatedMap.values(),key=lambda x:x.g + x.h)\r\n \r\n # if the current node is the goal then create the path by iterating backwards\r\n # and pushing the current node to the front of the path and then moving to the\r\n # parent node\r\n if currentNode.coord == goal:\r\n path = []\r\n while currentNode.parentNode:\r\n path.insert(0,currentNode.coord)\r\n currentNode = currentNode.parentNode\r\n path.insert(0,currentNode.coord)\r\n grid.setPath(path)\r\n return\r\n \r\n # Move the current node to the evaluated map and delete it from\r\n # the unevaluated map\r\n evaluatedMap[currentNode.coord] = currentNode\r\n del unevaluatedMap[currentNode.coord]\r\n \r\n # Mark the current node as having been visited\r\n grid.addVisited(currentNode.coord)\r\n \r\n # Get the neighbors of the current node\r\n neighbors = grid.getNeighbors(currentNode.coord)\r\n\r\n # For each neighbor check if that neighbor has alread been evaluated\r\n # if it has then skip that neighbor. If it hasn't and it isn't in the\r\n # unevaluated map add it with a high cost and heuristic.\r\n # Get the neighbor from the unevaluated map and calculate the current\r\n # cost. If the current cost is less than what existed update the neighbor\r\n # and add it back to the list otherwise skip to next neighbor\r\n for neighbor in neighbors:\r\n ncoord = (neighbor[0])\r\n if (ncoord) in evaluatedMap:\r\n continue\r\n if (ncoord) not in unevaluatedMap:\r\n node = Node(ncoord,float('inf'),float('inf'))\r\n unevaluatedMap[ncoord] = node\r\n \r\n node = unevaluatedMap[ncoord]\r\n calc_cost = currentNode.g + neighbor[1]\r\n if calc_cost >= node.g:\r\n continue\r\n \r\n node.parentNode = currentNode\r\n node.g = calc_cost\r\n node.h = heuristic(ncoord,goal)", "def lazy_a_star(agent):\n h1 = manhattan_heuristics\n h2 = search.straight_line_heursitic\n expanded_nodes.clear()\n\n # convert from numpy to regulat list, heappush has problems with numpy\n start_pos = (agent.start[0], agent.start[1])\n goal_pos = (agent.goal[0], agent.goal[1])\n current_pos = start_pos\n\n # initialization\n print(\"\\nCoordinate Configuration: (Y, X)\")\n print(\"Start State:\", start_pos)\n print(\"Goal State:\", goal_pos, \"\\n\")\n\n open_list = PQueue()\n closed_list = dict()\n root = {'loc': start_pos, 'g_val': 0,'h2_applied': False, 'h_val': h1(start_pos, goal_pos), 'parent': None}\n \n open_list.put(root, compare_lazyA)\n #push_node(open_list, root)\n closed_list[(root['loc'])] = root\n\n nodes_expanded = 0\n max_size_of_open = len(open_list.elements)\n while len(open_list.elements) > 0:\n # nodes_expanded += 1\n if len(open_list.elements) > max_size_of_open: # space complexity\n max_size_of_open = len(open_list.elements)\n\n node = open_list.get() #pop_node(open_list)\n if node['h2_applied'] == False:\n nodes_expanded += 1 # time complexity\n \n \n expanded_nodes.append(node['loc'])\n current_pos = node['loc']\n agent.current[0] = current_pos[0]\n agent.current[1] = current_pos[1]\n\n # path to goal state has been found\n if (node['loc'][0] == agent.goal[0] and node['loc'][1] == agent.goal[1]):\n print(\"SOLUTION FOUND!\")\n print(\"NODES EXPANDED:\", nodes_expanded)\n print(\"MAX SIZE OF OPEN_LIST:\", max_size_of_open)\n return get_path(node), expanded_nodes\n \n if node['h2_applied'] == False:\n if h1(node['loc'], goal_pos) < h2(node['loc'], goal_pos):\n node['h_val'] = h2(node['loc'], goal_pos)\n node['h2_applied'] = True\n open_list.put(node, compare_lazyA)\n else:\n \n # take movement option indices in agentBase.nextStep()...\n # map out viable indices to locations in map\n move_options = agent.nextStep()\n move_list =[]\n \n for i in range(len(move_options)):\n if move_options[i] == 1:\n move_list.append((node['loc'][0], node['loc'][1]+1))\n if move_options[i] == 2:\n move_list.append((node['loc'][0]+1, node['loc'][1]))\n if move_options[i] == 3:\n move_list.append((node['loc'][0], node['loc'][1]-1))\n if move_options[i] == 4: \n move_list.append((node['loc'][0]-1, node['loc'][1]))\n \n # end of for in loop\n \n # for valid locations, create movement child\n for move in move_list:\n child = {'loc': move,\n 'h2_applied': False,\n 'g_val': node['g_val'] + 1,\n 'h_val': h1(move, goal_pos),\n 'parent': node}\n if not (child['loc']) in closed_list: # pruning\n \n \n closed_list[(child['loc'])] = child\n #push_node(open_list, child)\n open_list.put(child, compare_lazyA)\n # end of for in loop\n\n # end of while\n return None # Failed to find solutions", "def registerInitialState(self, gameState):\r\n \r\n '''\r\n Make sure you do not delete the following line. If you would like to\r\n use Manhattan distances instead of maze distances in order to save\r\n on initialization time, please take a look at\r\n CaptureAgent.registerInitialState in captureAgents.py.\r\n '''\r\n CaptureAgent.registerInitialState(self, gameState)\r\n \r\n \r\n self.teamMates = []\r\n for mate in self.getTeam(gameState):\r\n if mate is not self.index:\r\n self.teamMates.append(mate)\r\n \r\n def getSuccessors(walls, state):\r\n successors = []\r\n for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:\r\n x,y = state\r\n dx, dy = Actions.directionToVector(action)\r\n nextx, nexty = int(x + dx), int(y + dy)\r\n if not walls[nextx][nexty]:\r\n nextState = (nextx, nexty)\r\n cost = 1\r\n successors.append( ( nextState, action, cost) )\r\n return successors\r\n \r\n \r\n \r\n class o0State:\r\n def __init__(self, pos, node = None):\r\n self.pos = pos\r\n self.node = node\r\n self.deadEndDepth = 0.0\r\n self.successors = {}\r\n self.successorsByNodePos = {}\r\n def isDeadEndNode(self):\r\n if self.node is None:\r\n return False\r\n noneDeadEndCount = 0\r\n for successor in self.successors.values():\r\n if not successor.isDeadEnd:\r\n noneDeadEndCount += 1\r\n return noneDeadEndCount is 1\r\n class o0Node:\r\n def __init__(self, pos):\r\n self.pos = pos\r\n self.isDeadEnd = False\r\n class o0Successor:\r\n def __init__(self, direction, nextPos, nextNodePos = None):\r\n self.direction = direction\r\n self.nextPos = nextPos\r\n self.nextNodePos = nextNodePos\r\n self.isDeadEnd = False\r\n\r\n class o0PathMap:\r\n def __init__(self, gameState):\r\n #print 'init pathMap'\r\n walls = gameState.getWalls()\r\n positions = walls.asList(False)\r\n self.states = {}\r\n self.nodes = {}\r\n for pos in positions:\r\n self.states[pos] = o0State(pos)\r\n for successor in getSuccessors(walls,pos):\r\n self.states[pos].successors[successor[1]] = o0Successor(successor[1],successor[0])\r\n successorCount = len(self.states[pos].successors)\r\n if successorCount is not 2:\r\n node = o0Node(pos)\r\n self.nodes[pos] = node\r\n self.states[pos].node = node\r\n \r\n def connectNode(node):\r\n for nodeSuccessor in self.states[node.pos].successors.values():\r\n if nodeSuccessor.nextNodePos is None:\r\n forwardSuccessors = [nodeSuccessor]\r\n backwardSuccessors = []\r\n previousPos = node.pos\r\n currentPos = nodeSuccessor.nextPos\r\n while currentPos not in self.nodes.keys():\r\n #print node.pos\r\n #print currentPos\r\n if len(self.states[currentPos].successors) is not 2:\r\n print 'not a path'\r\n for successor in self.states[currentPos].successors.values():\r\n #print successor.nextPos\r\n if successor.nextPos[0] is previousPos[0] and successor.nextPos[1] is previousPos[1]:\r\n backwardSuccessors.append(successor)\r\n else:\r\n forwardSuccessors.append(successor)\r\n previousPos = currentPos\r\n currentPos = forwardSuccessors[len(forwardSuccessors) - 1].nextPos\r\n for successor in self.states[currentPos].successors.values():\r\n if successor.nextPos is previousPos:\r\n backwardSuccessors.append(successor)\r\n \r\n for successor in forwardSuccessors:\r\n successor.nextNodePos = currentPos\r\n for successor in backwardSuccessors:\r\n successor.nextNodePos = node.pos\r\n \r\n #connectNode(self.nodes.values()[0])\r\n #connectNode(self.nodes.values()[1])\r\n #connectNode(self.nodes.values()[2])\r\n #connectNode(self.nodes.values()[3])\r\n #connectNode(self.nodes.values()[4])\r\n #connectNode(self.nodes.values()[5])\r\n \r\n for node in self.nodes.values():\r\n connectNode(node)#'''\r\n for state in self.states.values():\r\n for successor in self.states[state.pos].successors.values():\r\n self.states[state.pos].successorsByNodePos[successor.nextNodePos] = successor\r\n \r\n updatedNodes = self.nodes.values()\r\n while(len(updatedNodes) is not 0):\r\n nodePool = updatedNodes\r\n updatedNodes = []\r\n for node in nodePool:\r\n if self.states[node.pos].isDeadEndNode():\r\n self.nodes[node.pos].isDeadEnd = True\r\n for successor in self.states[node.pos].successors.values():\r\n self.states[successor.nextNodePos].successorsByNodePos[node.pos].isDeadEnd = True\r\n updatedNodes.append(self.states[successor.nextNodePos])\r\n \r\n #node.isDeadEnd = self.states[node.pos].isDeadEndNode()#'''\r\n \r\n '''\r\n for node in self.nodes.values():\r\n if self.states[node.pos].isDeadEndNode():\r\n node.isDeadEnd = True#'''\r\n \r\n deadEndNodes = {}\r\n noneDeadEndNodes = {}\r\n for node in self.nodes.values():\r\n if not node.isDeadEnd:\r\n noneDeadEndNodes[node.pos] = node\r\n else:\r\n deadEndNodes[node.pos] = node\r\n \r\n for node in deadEndNodes.values():#\r\n actions = breadthFirstSearch(AnyTargetSearchProblem(gameState,noneDeadEndNodes.keys(),node.pos))\r\n nodeConnectedTo = self.nodes[performActions(node.pos, actions)] \r\n actions = reverseActions(actions)\r\n pos = nodeConnectedTo.pos\r\n deadEndDepth = 0.0\r\n for action in actions:\r\n pos = performActions(pos,[action])\r\n deadEndDepth += 1.0\r\n self.states[pos].deadEndDepth = deadEndDepth\r\n def willDie(self, position, distance, scaredTime = 0):#distance from our agent to closest enemy\r\n deadEndDepth = self.states[position].deadEndDepth\r\n if deadEndDepth >= distance - deadEndDepth and deadEndDepth >= scaredTime:\r\n return True\r\n return False\r\n def isDeadEnd(self, position):\r\n return self.states[position].deadEndDepth >= 0.5\r\n #def getAllStatesInDeadEnd(self, anyState):\r\n \r\n\r\n global pathMap\r\n if pathMap is None:\r\n pathMap = o0PathMap(gameState)\r\n self.pathMap = pathMap\r\n targets[self.index] = None\r\n global lastEattenFoodAreDefendingPos\r\n lastEattenFoodAreDefendingPos = None \r\n global totalFood\r\n totalFood = len(self.getFood(gameState).asList())\r\n global leftFood\r\n leftFood = totalFood\r\n #self.debugDraw(pathMap.deadEndNodes.keys(),[1,0,0])\r\n #self.debugDraw(pathMap.nodes.keys(),[0,1,0])\r\n \r\n global pathMapDebugMode\r\n if pathMapDebugMode:\r\n for state in self.pathMap.states.values():\r\n deadEndColor = 0.3 + state.deadEndDepth * 0.1\r\n if deadEndColor>1.0:\r\n deadEndColor = 1.0\r\n if state.deadEndDepth == 0:\r\n deadEndColor = 0.0\r\n \r\n nodeColor = 0.0\r\n if state.node is not None:\r\n nodeColor = 0.5\r\n self.debugDraw(state.pos,[deadEndColor,0,0])\r\n\r\n self.curryFoodScore = 0.8\r\n \r\n \r\n \r\n global defenseWall\r\n global defensePositions\r\n if len(defenseWall) is 0:\r\n foods = self.getFoodYouAreDefending(gameState)\r\n for capsule in self.getCapsulesYouAreDefending(gameState):\r\n foods[capsule[0]][capsule[1]] = True\r\n defenseWall = actionsToPositions((0,0), aStarSearch(DefenseSearchProblem(gameState, foods, self.index),nullHeuristic))\r\n defensePositions = getPositionsNeededToDefense(gameState)\r\n global defenseWallDebugMode\r\n if defenseWallDebugMode is True:\r\n self.debugDraw(defenseWall,[0,0.5,0])\r\n self.debugDraw(defensePositions,[0.5,0,0])\r\n \r\n global agentInDeadEnd\r\n agentInDeadEnd[self.index] = False", "def search(state, goal_state):\n\n def gn(node):\n return node.gn()\n\n tiles_places = []\n for i in range(len(goal_state)):\n for j in range(len(goal_state)):\n heapq.heappush(tiles_places, (goal_state[i][j], (i, j)))\n\n def hn(node):\n cost = 0\n for i in range(len(node.state)):\n for j in range(len(node.state)):\n tile_i, tile_j = tiles_places[node.state[i][j]][1]\n if i != tile_i or j != tile_j:\n cost += abs(tile_i - i) + abs(tile_j - j)\n return cost\n\n def fn(node):\n return gn(node) + hn(node)\n\n return bfs.search(state, goal_state, fn)", "def a_star_search(problem, heuristic=null_heuristic):\r\n \"*** YOUR CODE HERE ***\"\r\n fringe = util.PriorityQueue()\r\n path = set()\r\n final = []\r\n acts = dict()\r\n state = problem.get_start_state()\r\n fringe.push(state, 0)\r\n\r\n while (True):\r\n state = fringe.pop()\r\n path.add(state)\r\n states = problem.get_successors(state)\r\n acts[state] = states[:]\r\n if problem.is_goal_state(state):\r\n break\r\n\r\n states = problem.get_successors(state)\r\n # push into fringe\r\n for stat in states:\r\n if stat[0] not in path:\r\n \"\"\"\r\n it does worse in corners problems, to work better needs heavy huristic, not worth in\r\n in corners problem expandend nodes grow expo\r\n all others are better\r\n counter = 0 # in some situation it helps, in some it doesnt\r\n #print(stat[0].pieces)\r\n for x in stat[0].pieces[0]:\r\n if x:\r\n counter += 1\r\n \"\"\"\r\n counter = 0\r\n fringe.push(stat[0], stat[2] + counter + heuristic(stat[0], problem)) # problem.get_cost_of_actions([stat[1]])\r\n\r\n while (True):\r\n\r\n for key, val in acts.items():\r\n for va in val:\r\n if va[0] == state:\r\n final.append(va[1])\r\n state = key\r\n break\r\n else:\r\n continue\r\n break\r\n if state == problem.get_start_state():\r\n break\r\n\r\n final.reverse()\r\n\r\n return final", "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n # Initialize data structures\n parent_node = {}\n path_to_node = {}\n priority_queue = util.PriorityQueue()\n\n p_c = 0.5\n h_c = 1 - p_c\n\n # Get the start node\n start_node = problem.getStartState()\n parent_node[start_node] = None\n path_to_node[start_node] = []\n priority_queue.update(start_node, 0)\n\n #goal_found = False\n\n while not priority_queue.isEmpty():\n # Get the next node\n node_to_expand = priority_queue.pop()\n # Check if goal state is reached\n if problem.isGoalState(node_to_expand):\n break\n next_nodes = problem.getSuccessors(node_to_expand)\n path_to_parent = path_to_node[node_to_expand]\n\n for one_node in next_nodes:\n point, move, cost = one_node\n curr_path = path_to_node[node_to_expand] + [move]\n curr_cost = problem.getCostOfActions(curr_path)\n heuristic_cost = heuristic(point, problem)\n # Check if current node already exists in the previously visited nodes\n if point in path_to_node:\n prev_cost = problem.getCostOfActions(path_to_node[point])\n if prev_cost > curr_cost:\n path_to_node[point] = curr_path\n priority_queue.update(point, curr_cost + heuristic_cost)\n \n else:\n path_to_node[point] = curr_path\n priority_queue.update(point, curr_cost + heuristic_cost)\n \n # current_cost = problem.getCostOfActions(point) * p_c + heuristic(point, problem) * h_c\n\n print(node_to_expand) \n return path_to_node[node_to_expand]\n \n# nodes_to_expand = set()\n# # get max value node in the fringe node\n# min_val = float(\"inf\")\n# for one_node in fringe_node:\n# # Compute the cost to reach a node\n# total_cost = cost_to_point[one_node] * p_c + heuristic(one_node,problem) * h_c\n# if total_cost < min_val:\n# min_val = total_cost\n# \n# for one_node in fringe_node:\n# # Compute the cost to reach a node\n# total_cost = cost_to_point[one_node] * p_c + heuristic(one_node,problem) * h_c\n# if total_cost == min_val:\n# nodes_to_expand.add(one_node)\n# fringe_node.remove(one_node)\n#\n# # Expand the fringe node \n# for one_node in nodes_to_expand:\n# path_to_parent = path_to_point[one_node]\n# for nxt_node in problem.getSuccessors(one_node):\n# pos = nxt_node[0]\n# mv = nxt_node[1]\n# # check if point already present in path to point\n# prev_cost = float(\"inf\")\n# if pos in cost_to_point:\n# prev_cost = cost_to_point[pos]\n# new_path = path_to_parent + [mv]\n# if prev_cost > problem.getCostOfActions(new_path):\n# path_to_point[pos] = new_path\n# cost_to_point[pos] = problem.getCostOfActions(new_path)\n# fringe_node.append(pos)\n#\n# # Check if destination is reached in the fringe node\n# for one_node in fringe_node:\n# if problem.isGoalState(one_node):\n# final_node = one_node\n# goal_found = True\n# break\n# \n# #print(len(fringe_node))\n# print(final_node)\n# print(path_to_point[final_node])\n# return path_to_point[final_node] \n\n util.raiseNotDefined()", "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n class Node:\n def __init__(self, state, parent, action, pathCost):\n self.state = state\n self.parent = parent\n self.action = action\n self.pathCost = pathCost\n\n def solution(self):\n path = list()\n tempNode = self\n while tempNode.state != problem.getStartState():\n path.insert(0, tempNode.action)\n tempNode = tempNode.parent\n return path\n\n def __eq__(self, other):\n if isinstance(other, Node):\n return self.state == other.state\n\n\n def childNode(successor, parent, action, stepCost):\n pathCost = parent.pathCost + stepCost\n child = Node(successor, parent, action, pathCost)\n return child\n\n initialNode = Node(problem.getStartState(), None, None, 0)\n frontier = util.PriorityQueue() #bfs uses a queue\n frontier.push(initialNode, initialNode.pathCost + heuristic(initialNode.state, problem)) #we use f(n) = pathCost + h(n) for the best solution\n explored = set()\n\n while not frontier.isEmpty() :\n nextNode = frontier.pop() #extract from the start of the queue\n if problem.isGoalState(nextNode.state):\n return nextNode.solution()\n explored.add(nextNode.state)\n for successor, action, stepCost in problem.getSuccessors(nextNode.state):\n child = childNode(successor, nextNode, action, stepCost)\n if child.state not in explored:\n frontier.update(child, child.pathCost + heuristic(child.state, problem))\n return []\n util.raiseNotDefined()", "def foodHeuristic(state, problem):\n\n position, foodGrid = state\n\n # *** Your Code Here ***\n if len(foodGrid.asList()) == 0: # If no food, then no need to go on\n return 0\n trackHeuristic = []\n # Manhattan dist between curr node position and all foods\n # If there is food, iterate through all available foods\n for food in foodGrid.asList():\n currentHeuristic = distance.manhattan(position, food)\n trackHeuristic.append(currentHeuristic)\n return max(trackHeuristic)", "def cornersHeuristic(state, problem):\n corners = problem.corners # These are the corner coordinates\n walls = problem.walls # These are the walls of the maze, as a Grid (game.py)\n \"*** YOUR CODE HERE ***\"\n \"\"\"\n En este ejercicio me he dado cuenta de un problema de mi definición del espacio de estados:\n - El espacio de estados consiste en tuplas ((x,y), grid), donde (x,y) es la posición en coordenadas\n y grid es la tabla de true/false.\n - El problema es que yo he pensado la tabla grid en forma de matriz matemática, de manera que los índices\n no van de acuerdo con la posición de las esquinas, sinó con los índices de una matriz.\n Para solucionar este problema sin tener que modificar todo lo anterior (dado que no me queda tiempo) lo que he\n tenido que hacer es crear una lista y añadir de forma ordenada los valores true/false, para que se corresponda\n cada uno con su esquina.\n \n Mi heurística consiste en lo siguiente:\n * Calculo la distancia desde la posición en la que me sitúo hasta todos los corners no visitados (los que aún\n tienen comida) y me quedo con la mínima de estas distancias, y con el corner que me de esa mínima.\n * Calculo la distancia desde ese corner (el mínimo de antes) hasta todos los otros posibles corners no visitados\n y de nuevo me quedo con la mínima distancia y con el corner que me da esa mínima.\n * Repito este proceso hasta que no queden corners.\n Entonces lo que hago es definir una nueva lista de corners, newListOfCorners que irá extrayendo los corners a medida\n que su distanca sea calculada. Por ejemplo, si tengo los cuatro corners con comida y estoy en una posición \n aleatoria, la lista newListOfCorners estará llena. Se calculará la distancia a cada corner y el corner que de la \n mínima será extraído de newListOfCorners. Entonces se calculará la distancia desde este corner hasta los restantes\n tres corners de newListOfCorners y el corner de esos tres que me de la mínima será extraído de la lista. Etc...\n \"\"\"\n\n # Ordenamos la lista de True's y False's para que vaya acorde con el orden de la lista corners:\n visitedCorners = []\n visitedCorners.append(state[1][1][0])\n visitedCorners.append(state[1][0][0])\n visitedCorners.append(state[1][1][1])\n visitedCorners.append(state[1][0][1])\n corners = list(corners) # De aquí saco una lista que contenga los corners ordenados.\n # Ahora los corners y la lista de visitedCorners contendrán la información de forma ordenada y coherente\n minimum = 9999999999999999 # Defino un mínimo muy grande para asegurarme que nunca sea superado\n total = 0 # Inicializo el total a cero\n newListOfCorners = [] # Creo una nueva lista para añadir los corners no estudiados\n for corner in corners: # Primero vamos a llenar la lista de corners con los que me interesen: los que tienen comida\n if visitedCorners[corners.index(corner)]: # Miramos que el corner tenga comida, sino pasamos\n newListOfCorners.append(corner) # Si tiene comida, lo añadimos\n minimCorner = corners[0] # Inicializo el minimCorner a un corner aleatorio para que no me de problemas más tarde\n actualState = state[0] # Lo mismo\n\n while not len(newListOfCorners) == 0: # Mientras la lista no esté vacía...\n for corner in newListOfCorners: # Cogemos un corner de la lista\n distanceToCorner = manhattanHeuristicToCorners(actualState, corner) # Calculamos dist. a corner\n if distanceToCorner < minimum: # Calculamos el mínimo\n minimum = distanceToCorner\n minimCorner = corner\n total += minimum # Y lo añadimos al total\n actualState = minimCorner # Reactualizamos cada variable para volver a empezar el bucle\n minimum = 9999999999999999999999999999999\n newListOfCorners.remove(minimCorner)\n return total", "def astar_multi(maze):\n # TODO: Write your code here\n gFunction = {}\n frontier = PriorityQueue()\n path = []\n ret = []\n MSTLengths = {}\n edges = {}\n\n objectives = maze.getObjectives()\n start = State(maze.getStart()[0], maze.getStart()[1], objectives)\n gFunction[start] = 0\n frontier.put(start) \n getEdgeWeights(maze, objectives, edges) # init edge weights for MST\n\n while not frontier.empty():\n\n currentState = frontier.get()\n currentCell = currentState.cell()\n objectivesLeft = currentState.objectives()\n\n if objectivesLeft.count(currentCell) != 0:\n objectivesLeft.remove(currentCell)\n\n # all objectives found, initialise backtrace and exit loop\n if len(objectivesLeft) == 0:\n path.clear()\n ret.clear()\n path.append(currentState)\n ret.append(currentCell)\n break\n \n # if we have already calculated MST length we can reuse value\n # else calculate MST length for this state and store it.\n length = 0\n if str(objectivesLeft) in MSTLengths:\n length = MSTLengths[str(objectivesLeft)]\n else:\n length = getMSTLength(objectivesLeft.copy(), maze, edges)\n MSTLengths[str(objectivesLeft)] = length\n\n neighbors = maze.getNeighbors(currentCell[0], currentCell[1])\n\n for i in neighbors:\n\n neighbor = State(i[0], i[1], objectivesLeft)\n gVal= gFunction[currentState] + 1\n\n if neighbor not in gFunction or gVal < gFunction[neighbor]:\n\n neighbor.setParent(currentState)\n gFunction[neighbor] = gVal\n\n hFunction = []\n for j in objectivesLeft:\n hFunction.append(abs(j[0] - i[0]) + abs(j[1] - i[1]) + length) # use MST length + manhatten distance to nearest objective as heuristic.\n\n hVal = min(hFunction)\n\n neighbor.setfFunction(gFunction[neighbor] + hVal)\n frontier.put(neighbor)\n\n # backtrace\n while path[0]!= start:\n \n currentCell = path[0]\n path.insert(0, currentCell.parent())\n ret.insert(0, currentCell.parent().cell())\n\n return ret", "def uniform_cost_search(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n fringe = util.PriorityQueue()\r\n path = set()\r\n final = []\r\n acts = dict()\r\n state = problem.get_start_state()\r\n fringe.push(state, 0)\r\n\r\n while (True):\r\n state = fringe.pop()\r\n path.add(state)\r\n states = problem.get_successors(state)\r\n acts[state] = states[:]\r\n if problem.is_goal_state(state):\r\n break\r\n\r\n #states = problem.get_successors(state)\r\n # push into fringe\r\n for stat in states:\r\n if stat[0] not in path:\r\n fringe.push(stat[0], stat[1].piece.get_num_tiles()) #problem.get_cost_of_actions([stat[1]])\r\n\r\n while (True):\r\n if state == problem.get_start_state():\r\n break\r\n for key, val in acts.items():\r\n for va in val:\r\n if va[0] == state:\r\n final.append(va[1])\r\n state = key\r\n break\r\n else:\r\n continue\r\n break\r\n\r\n final.reverse()\r\n\r\n return final", "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n \"\"\"\n same as UCS function, but total cost is sum of cost till now , cost to the child node and \n cost to the goal state (heuristic function)\n \"\"\"\n fringes = util.PriorityQueue()\n explored =set()\n fringes.push((problem.getStartState(),[]),0)\n\n while(not fringes.isEmpty()):\n currentNode,currDir = fringes.pop()\n if problem.isGoalState(currentNode):\n finalPath = currDir\n break\n # print \"HOraaay goal has been found === > \", currentNode\n\n if not (currentNode in explored):\n explored.add(currentNode)\n for childNode in problem.getSuccessors(currentNode):\n totalCost = (childNode[2] + heuristic(childNode[0],problem)+problem.getCostOfActions(currDir))\n fringes.push((childNode[0],currDir+[childNode[1]]),totalCost)\n\n\n return finalPath\n\n\n\n\n\n\n\n util.raiseNotDefined()", "def getMove(self, grid):\n# global prune\n# prune = 0\n def Terminal(stateTup):\n \"\"\"\n Checks if the node is a terminal node\n Returns eval(state) if it is terminal\n \"\"\"\n state = stateTup[0]\n maxDepth = self.depthLimit\n if stateTup[1] == maxDepth:\n val = self.h.get(str(state.map))\n if val == None:\n Val = Eval(state)\n self.h[str(state.map)] = Val\n return Val\n else:\n return val\n elif len(stateTup[0].getAvailableMoves()) == 0:\n val = self.h.get(str(state.map))\n if val == None:\n Val = Eval(state)\n self.h[str(state.map)] = Val\n return Val\n else:\n return val\n\n def Eval(state):\n \"\"\"\n This is the eval function which combines many heuristics and assigns\n weights to each of them\n Returns a single value\n \"\"\"\n\n# H1 = htest2(state)\n# return H1\n H2 = h1(state)*monotonic(state)\n return H2\n\n\n def h1(state):\n Max = state.getMaxTile()\n left = len(state.getAvailableCells())/16\n if state.getCellValue([0,0]) == Max:\n v = 1\n else:\n v= 0.3\n Max = Max/1024\n return Max*left*v\n\n def mono(state):\n mon = 0\n# for i in range(4):\n# row = 0\n# for j in range(3):\n# if state.map[i][j] > state.map[i][j+1]:\n# row+=1\n# if row == 4:\n# mon += 1\n# for i in range(4):\n# column = 0\n# for j in range(3):\n# if state.map[j][i] > state.map[j+1][i]:\n# column +=1\n# if column == 4:\n# mon +=1\n#\n#\n# return mon/8\n for i in range(4):\n if all(earlier >= later for earlier, later in zip(grid.map[i], grid.map[i][1:])):\n mon+=1\n\n return mon/8\n\n def monotonic(state):\n cellvals = {}\n Path1 = [(3,0),(3,1),(3,2),(3,3),(2,3),(2,2),(2,1),(2,0),\n (1,0),(1,1),(1,2),(1,3),(0,3),(0,2),(0,1),(0,0)]\n for i in Path1:\n cellvals[i] = state.getCellValue(i)\n mon = 0\n for i in range(4):\n if cellvals.get((i,0)) >= cellvals.get((i,1)):\n if cellvals.get((i,1)) >= cellvals.get((i,2)):\n if cellvals.get((i,2)) >= cellvals.get((i,3)):\n mon +=1\n for j in range(4):\n if cellvals.get((0,j)) >= cellvals.get((1,j)):\n if cellvals.get((1,j)) >= cellvals.get((2,j)):\n if cellvals.get((2,j)) >= cellvals.get((3,j)):\n mon+=1\n return mon/8\n\n\n\n def htest2(state):\n score1 = 0\n score2 = 0\n r = 0.5\n\n Path1 = [(3,0),(3,1),(3,2),(3,3),(2,3),(2,2),(2,1),(2,0),\n (1,0),(1,1),(1,2),(1,3),(0,3),(0,2),(0,1),(0,0)]\n Path2 = [(3,0),(2,0),(1,0),(0,0),(0,1),(1,1),(2,1),(3,1),\n (3,2),(2,2),(1,2),(0,2),(0,3),(1,3),(2,3),(3,3)]\n valDict = {}\n for n in range(16):\n valDict[Path1[n]] = state.getCellValue(Path1[n])\n for n in range(16):\n if n%3 == 0:\n self.emergency()\n cell1 = valDict.get(Path1[n])\n cell2 = valDict.get(Path2[n])\n score1 += (cell1) * (r**n)\n score2 += (cell2) * (r**n)\n return max(score1,score2)\n\n\n def Maximize(stateTup,A,B):\n \"\"\"\n Returns a tuple of state,eval(state)\n Takes in a stateTup(tuple of grid + depth of the grid), alpha,\n and beta\n \"\"\"\n self.emergency()\n t = Terminal(stateTup)\n if t != None:\n return (None, t)\n\n maxChild , maxUtility = None,-999999999\n state = stateTup[0]\n Map = self.dict.get(str(state.map))\n if Map == None:\n children = []\n for M in range(4):\n g = state.clone()\n if g.move(M):\n children.append(g)\n self.dict[str(state.map)] = children\n else:\n children = Map\n for child in children:\n childTup = (child,stateTup[1]+1)\n utility = Minimize(childTup,A,B)[1]\n if utility > maxUtility:\n maxChild , maxUtility = child , utility\n if maxUtility >= B:\n# global prune\n# prune +=1\n break\n if maxUtility > A:\n A = maxUtility\n\n return (maxChild,maxUtility)\n\n\n def Minimize(stateTup,A,B):\n \"\"\"\n Returns a tuple of state,eval(state)\n Takes in a stateTup(tuple of grid + depth of the grid), alpha,\n and beta\n \"\"\"\n self.emergency()\n t = Terminal(stateTup)\n if t != None:\n return (None, t)\n\n minChild , minUtility = None,999999999\n state = stateTup[0]\n Map= self.dict.get(str(state.map))\n if Map == None:\n cells= state.getAvailableCells()\n children = []\n tiles = [2,4]\n for i in cells:\n for j in tiles:\n g = state.clone()\n g.insertTile(i,j)\n children.append(g)\n self.dict[str(state.map)] = children\n else:\n children = Map\n for child in children:\n childTup = (child,stateTup[1]+1)\n utility = Maximize(childTup,A,B)[1]\n if utility < minUtility:\n minChild , minUtility = child , utility\n if minUtility <= A:\n# global prune\n# prune +=1\n break\n if minUtility < B:\n B = minUtility\n\n return (minChild,minUtility)\n\n\n\n def decision(grid):\n \"\"\"\n Decision function which returns the move which led to the state\n \"\"\"\n child = Maximize((grid,0),-999999999,999999999)[0]\n Child = child.map\n g = grid.clone()\n for M in range(4):\n if g.move(M):\n if g.map == Child:\n # global prune\n # global pruneLog\n # pruneLog.append(prune)\n # print(prune)\n # print(sum(pruneLog)/len(pruneLog))\n return M\n g = grid.clone()\n\n self.dict = {}\n self.h = {}\n self.prevTime = time.clock()\n self.depthLimit = 1\n self.mL = []\n self.over = False\n while self.over == False:\n self.depthLimit +=1\n try :\n self.mL.append(decision(grid))\n\n except KeyError:\n# print(self.depthLimit)\n return self.mL[-1]\n except IndexError:\n return random.randint(0,3)\n self.Alarm(time.clock())\n return self.mL[-1]", "def astar(grid, heuristic):\r\n\r\n class MapNode:\r\n def __init__(self, cell, cost, parent):\r\n self.cell = cell\r\n self.cost = cost\r\n self.parent = parent\r\n\r\n @functools.total_ordering\r\n class FrontierElement:\r\n def __init__(self, cell, cost, parent, estimatedCost):\r\n self.node = MapNode(cell, cost, parent)\r\n self.estimatedCost = estimatedCost\r\n def __lt__(self, other):\r\n return self.estimatedCost < other.estimatedCost\r\n def __eq__(self, other):\r\n return self.estimatedCost is other.estimatedCost\r\n\r\n frontier = PriorityQueue()\r\n visitedNodes = set()\r\n frontier.put(FrontierElement(grid.getStart(), 0, None, 0))\r\n\r\n path = []\r\n\r\n while not frontier.empty():\r\n currentElement = frontier.get()\r\n grid.addVisited(currentElement.node.cell)\r\n visitedNodes.add(currentElement.node.cell)\r\n\r\n if currentElement.node.cell in grid.getGoals():\r\n currentNode = currentElement.node\r\n while currentNode is not None:\r\n path.insert(0, currentNode.cell)\r\n currentNode = currentNode.parent\r\n break\r\n\r\n for neighbor in grid.getNeighbors(currentElement.node.cell):\r\n neighborCoord = neighbor[0]\r\n\r\n if neighborCoord in visitedNodes:\r\n continue\r\n\r\n neighborCost = neighbor[1]\r\n cheapestGoal = min(grid.getGoals(), key=lambda goal: Vector2.fromCell(neighborCoord).squaredDistanceTo(Vector2.fromCell(goal)))\r\n\r\n cost = currentElement.node.cost + neighborCost\r\n\r\n frontier.put(FrontierElement(neighborCoord, cost, currentElement.node, cost + heuristic(neighborCoord, cheapestGoal)))\r\n\r\n grid.setPath(path)", "def cornersHeuristic(state, problem):\n corners = problem.corners # These are the corner coordinates\n walls = problem.walls # These are the walls of the maze, as a Grid (game.py)\n distance = []\n for i in range(len(corners)):\n distance.append(fabs((corners[i][0] - state[0][0]) + (corners[i][1] - state[0][1])))\n \"*** YOUR CODE HERE ***\"\n return min(distance) # Default to trivial solution", "def A_Star(start, goal, final_occupancy_grid):\n x, y = np.mgrid[0:45:1, 0:42:1]\n pos = np.empty(x.shape + (2,))\n pos[:, :, 0] = x;\n pos[:, :, 1] = y\n pos = np.reshape(pos, (x.shape[0] * x.shape[1], 2))\n coords = list([(int(x[0]), int(x[1])) for x in pos])\n\n # Define the heuristic:\n # h: dictionary containing the distance to goal ignoring obstacles for all coordinates in the grid (heuristic function)\n h = np.linalg.norm(pos - goal, axis=1)\n h = dict(zip(coords, h))\n\n # Check if the start and goal are within the boundaries of the map\n for point in [start, goal]:\n\n if point[0] < 0 and point[0] >= final_occupancy_grid.shape[0]:\n raise Exception('Start node/goal node is not contained in the map')\n\n if point[1] < 0 and point[1] >= final_occupancy_grid.shape[1]:\n raise Exception('Start node/goal node is not contained in the map')\n\n # check if start and goal nodes correspond to free spaces\n if final_occupancy_grid[start[0], start[1]]:\n raise Exception('Start node is not traversable')\n\n if final_occupancy_grid[goal[0], goal[1]]:\n raise Exception('Goal node is not traversable')\n\n # get the possible movements\n movements = _get_movements_8n()\n\n # The set of visited nodes that need to be (re-)expanded, i.e. for which the neighbors need to be explored\n # Initially, only the start node is known.\n openSet = [start]\n\n # The set of visited nodes that no longer need to be expanded.\n closedSet = []\n\n # For node n, cameFrom[n] is the node immediately preceding it on the cheapest path from start to n currently known.\n cameFrom = dict()\n\n # For node n, gScore[n] is the cost of the cheapest path from start to n currently known.\n gScore = dict(zip(coords, [np.inf for x in range(len(coords))]))\n gScore[start] = 0\n\n # For node n, fScore[n] := gScore[n] + h(n). map with default value of Infinity\n fScore = dict(zip(coords, [np.inf for x in range(len(coords))]))\n fScore[start] = h[start]\n\n # while there are still elements to investigate\n while openSet != []:\n\n # the node in openSet having the lowest fScore[] value\n fScore_openSet = {key: val for (key, val) in fScore.items() if key in openSet}\n current = min(fScore_openSet, key=fScore_openSet.get)\n del fScore_openSet\n\n # If the goal is reached, reconstruct and return the obtained path\n if current == goal:\n return reconstruct_path(cameFrom, current)\n\n openSet.remove(current)\n closedSet.append(current)\n\n # for each neighbor of current:\n for dx, dy, deltacost in movements:\n\n neighbor = (current[0] + dx, current[1] + dy)\n\n # if the node is not in the map, skip\n if (neighbor[0] >= final_occupancy_grid.shape[0]) or (neighbor[1] >= final_occupancy_grid.shape[1]) or (\n neighbor[0] < 0) or (neighbor[1] < 0):\n continue\n\n # if the node is occupied, skip\n if (final_occupancy_grid[neighbor[0], neighbor[1]]):\n continue\n\n # if the has already been visited, skip\n if (neighbor in closedSet):\n continue\n # d(current,neighbor) is the weight of the edge from current to neighbor\n # tentative_gScore is the distance from start to the neighbor through current\n tentative_gScore = gScore[current] + deltacost\n\n if neighbor not in openSet:\n openSet.append(neighbor)\n\n if tentative_gScore < gScore[neighbor]:\n # This path to neighbor is better than any previous one. Record it!\n cameFrom[neighbor] = current\n gScore[neighbor] = tentative_gScore\n fScore[neighbor] = gScore[neighbor] + h[neighbor]\n\n # Open set is empty but goal was never reached\n print(\"No path found to goal\")\n return []", "def manhattan_heuristic(state):\n man_h = 0\n size = len(state)\n for i in range (size):\n for j in range (size):\n if state[i][j] == 0:\n continue\n else:\n man_h = man_h + abs(i - int(state[i][j]/3)) + abs(j - (state[i][j])%3)\n return man_h", "def dijkstras(occupancy_map,x_spacing,y_spacing,start,goal):\n ROWS, COLS = occupancy_map.shape\n #convert physical location to index in the grid\n startNode = locToIndex(start, x_spacing, y_spacing)\n startingNodeLoc = indexToLoc(startNode, x_spacing, y_spacing)\n initialcost = math.sqrt((startingNodeLoc[0] - start[0])**2 + (startingNodeLoc[1] - start[1])**2)\n goalNode = locToIndex(goal, x_spacing, y_spacing)\n \n freelist = np.where(occupancy_map == 0)\n if occupancy_map[startNode[0], startNode[1]] != 0:\n #raise ValueError(\"start : ({}, {}) invalid, is an obstacle\".format(startNode[0], startNode[1]))\n startNode = findValidNode(startNode, start, occupancy_map, x_spacing, y_spacing)\n if occupancy_map[goalNode[0], goalNode[1]] != 0:\n #raise ValueError(\"goal: ({}, {}) invalid, is an obstacle\".format(goalNode[0], goalNode[1]))\n goalNode = findValidNode(goalNode, goal, occupancy_map, x_spacing, y_spacing)\n candidate = [ [sys.float_info.max, \n i, (freelist[0][i], freelist[1][i])] for i in range(len(freelist[0]))] \n visited = set([])\n queue = PriorityQueue(candidate)\n paths = {}\n found = False\n\n #update initial cost\n queue.remove(startNode)\n queue.insert(startNode, initialcost)\n paths[startNode] = None\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, 0, 1, queue, paths, x_spacing, y_spacing, initialcost)\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, 0, -1, queue, paths, x_spacing, y_spacing, initialcost)\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, 1, 0, queue, paths, x_spacing, y_spacing, initialcost)\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, -1, 0, queue, paths, x_spacing, y_spacing, initialcost)\n while queue.size() > 0:\n priority, current = queue.pop()\n if current == goalNode:\n found = True\n break\n #not reaching goal node yet, for each of its neighbor, update the weight\n visited.add(current)\n update(occupancy_map, ROWS, COLS, current, 0, 1, priority, queue, paths, visited, x_spacing, y_spacing)\n update(occupancy_map, ROWS, COLS, current, 0, -1, priority, queue, paths, visited, x_spacing, y_spacing)\n update(occupancy_map, ROWS, COLS, current, 1, 0, priority, queue, paths, visited, x_spacing, y_spacing)\n update(occupancy_map, ROWS, COLS, current, -1, 0, priority, queue, paths, visited, x_spacing, y_spacing)\n \n if not found:\n raise ValueError(\"fail to find shortest path\")\n node = goalNode\n shortestpath = []\n while node is not None:\n shortestpath.append(node)\n node = paths[node]\n #shortestpath.append(startNode)\n #print (startNode)\n #print ('*', list(reversed(shortestpath)))\n #print (goalNode)\n p = list(reversed([ indexToLoc(n, x_spacing, y_spacing) for n in shortestpath]))\n #start and final position may not fall on center of the grid\n if abs(p[0][0] - start[0]) > 0.0005 or abs(p[0][1] - start[1]) > 0.0005:\n p.insert(0, [start[0][0], start[1][0]])\n if abs(p[-1][0] - goal[0]) > 0.0005 or abs(p[-1][1] - goal[1]) > 0.0005:\n p.append([goal[0][0], goal[1][0]])\n res = np.array(p)\n print (res)\n return res", "def fast(maze):\n # TODO: Write your code here\n pq = []\n visited = {}\n\n goals = maze.getObjectives()\n goals_pq = new_pq(maze, goals, maze.getStart())\n\n f, curr_goal = heapq.heappop(goals_pq)\n heapq.heappush(pq, (f, [maze.getStart()]))\n\n while len(pq) > 0:\n curr_path = heapq.heappop(pq)[1]\n curr = curr_path[-1]\n\n if curr in visited:\n continue\n heuristic = closest(maze, curr, curr_goal)\n\n f = heuristic + len(curr_path) - 1\n visited[curr] = f\n if curr in goals:\n goals.remove(curr)\n if len(goals) == 0:\n return curr_path\n else:\n # print(\"before\")\n # print(curr_goal)\n goals_pq = new_pq(maze, goals, curr)\n f, curr_goal = heapq.heappop(goals_pq)\n # print(\"after\")\n # print(curr_goal)\n pq = []\n heapq.heappush(pq, (f, curr_path))\n visited.clear()\n continue\n for item in maze.getNeighbors(curr[0], curr[1]):\n heuristic = closest(maze, item, curr_goal)\n new_f = heuristic + len(curr_path) - 1\n if item not in visited:\n heapq.heappush(pq, (new_f, curr_path + [item]))\n else: # checks if overlap has smaller f\n if new_f < visited[item]:\n visited[item] = new_f\n heapq.heappush(pq, (new_f, curr_path + [item]))\n return []", "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n\n start = problem.getStartState()\n frontier = util.PriorityQueue() # in heap stored as ( cost,priority,location)\n frontier.push(start, 0)\n explored = []\n\n location = 0 # to remember which successor part im accessing\n action = 1\n heap_location = 2\n cost = 2\n\n history = []\n total_cost = 0 # need something to process total path cost\n\n while not frontier.isEmpty():\n\n current_position = frontier.pop()\n if problem.isGoalState(current_position):\n break\n if current_position not in explored:\n explored.append(current_position)\n else:\n continue\n\n for path in problem.getSuccessors(current_position):\n # if path[location] not in explored: # hasn't been expanded from\n if path[location] not in [item[heap_location] for item in frontier.heap]: # if not in frontier\n # print(\"valid successor (no frontier)\", each_successor[location])\n\n for entry in history:\n if entry['To'] == current_position:\n total_cost = entry['Cost']\n heuristic_cost = total_cost + heuristic(path[location], problem)\n frontier.push(path[location], path[cost] + total_cost + heuristic_cost)\n history.append({'From': current_position, 'To': path[location], 'By': path[action],\n 'Cost': total_cost + path[cost]})\n else:\n # print(\"in frontier\")\n for entry in history:\n if entry['To'] == current_position:\n total_cost = entry['Cost']\n frontier.update(path[location], total_cost + path[cost])\n # should prob add something that goes through history and wipes old entry for that point\n for entry in history:\n if entry['To'] == path[location] and entry['Cost'] > total_cost + path[cost]:\n history.remove(entry)\n history.append({'From': current_position, 'To': path[location], 'By': path[action],\n 'Cost': total_cost + path[cost]})\n break\n while not problem.isGoalState(history[-1]['To']): # loop removes last couple of movements which don't lead to goal\n history.remove(history[-1])\n\n x = len(history)\n while x - 1 != 0: # loop clears out actions that dont come from previous position\n if history[x - 1]['From'] != history[x - 2]['To']: # starts from goal and works backwards\n history.remove(history[x - 2])\n x = len(history)\n else:\n x -= 1\n\n return [path['By'] for path in history]", "def path_search(start, goal):\n if start == goal:\n return [start]\n explored = {}\n explored[start] = 2\n queue = [ [start, ('', 0)] ]\n bestPath = [start, ('', 1110)]\n bestPathList = []\n total = 0\n costSearchingNow = 0\n while queue:\n total += 1\n # if total>40000:\n # return -1,' fail'\n if queue[0][-1][-1] != costSearchingNow:\n \tqueue.sort(key=lambda path:path[-1][-1])\n \n path = queue.pop(0)\n costSearchingNow = path[-1][-1]\n s = path[-2]\n # print len(queue)\n # cout(path)\n # print queue\n\n if s == goal:\n bestPath = path\n # print 'Find one best path ↑'\n bestPathList.append(bestPath)\n if len(queue)==0:\n # print '~~~~',total,getString \n return total,getString(bestPathList,start,goal)\n else:\n if path[-1][-1] > bestPath[-1][-1]:\n return total,getString(bestPathList,start,goal)\n\n linenum, changetimes = path[-1]\n \n for state, actions in sh_subway[s].items():\n for action in actions:\n linechange = changetimes + 1\n if linenum != action:\n linechange += changePunishment\n path2 = path[:-1] + [action, state, (action, linechange)]\n\n if (path2[-1][-1]-len(path2)/2-1)/changePunishment <= 4:\n if len(path2)>6:\n if (path2[-2] == '上海赛车场' and path2[-4]=='嘉定新城' and path2[-6]=='马陆') or (path2[-6] == '上海赛车场' and path2[-4]=='嘉定新城' and path2[-2]=='马陆') or (path2[-2] == '龙柏新村' and path2[-4]=='龙溪路' and path2[-6]=='水城路') or (path2[-6] == '龙柏新村' and path2[-4]=='龙溪路' and path2[-2]=='水城路'):\n linechange -= changePunishment\n path2 = path[:-1] + [action, state, (action, linechange)]\n\n if path2.count(state)<=1:\n if state not in explored:\n explored[state] = linechange\n queue.append(path2)\n \n elif linechange <= explored[state]+changePunishment: # 考虑马上到终点\n \n explored[state] = linechange\n queue.append(path2)\n\n\n return total,getString(bestPathList,start,goal)", "def heuristicManhattan(state):\n t = state.node.getTiles()\n tArray = [t[i:i+3] for i in range(0, 9, 3)]\n heuristik = 0\n for row in range(len(tArray)):\n for col in range(len(tArray[row])):\n if tArray[row][col] == 1:\n heuristik += abs(row) + abs(col - 1)\n elif tArray[row][col] == 2:\n heuristik += abs(row) + abs(col - 2)\n elif tArray[row][col] == 3:\n heuristik += abs(row - 1) + abs(col)\n elif tArray[row][col] == 4:\n heuristik += abs(row - 1) + abs(col - 1)\n elif tArray[row][col] == 5:\n heuristik += abs(row - 1) + abs(col - 2)\n elif tArray[row][col] == 6:\n heuristik += abs(row - 2) + abs(col)\n elif tArray[row][col] == 7:\n heuristik += abs(row - 2) + abs(col - 1) \n elif tArray[row][col] == 8:\n heuristik += abs(row - 2) + abs(col - 2)\n return heuristik", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n start = problem.getStartState()\n frontier = util.PriorityQueue() # in heap stored as ( cost,priority,location)\n frontier.push(start, 0)\n explored = []\n\n location = 0 # to remember which successor part im accessing\n action = 1\n heap_location = 2\n cost = 2\n\n history = []\n total_cost = 0 # need something to process total path cost\n\n while not frontier.isEmpty():\n\n current_position = frontier.pop()\n if problem.isGoalState(current_position):\n break\n if current_position not in explored:\n explored.append(current_position)\n else:\n continue\n\n for path in problem.getSuccessors(current_position):\n # if path[location] not in explored: # hasen't been expanded from\n if path[location] not in [item[heap_location] for item in frontier.heap]: # if not in frontier\n # print(\"valid successor (no frontier)\", each_successor[location])\n\n for entry in history:\n if entry['To'] == current_position:\n total_cost = entry['Cost']\n\n frontier.push(path[location], path[cost] + total_cost)\n history.append({'From': current_position, 'To': path[location], 'By': path[action],\n 'Cost': total_cost + path[cost]})\n else:\n # print(\"in frontier\")\n for entry in history:\n if entry['To'] == current_position:\n total_cost = entry['Cost']\n frontier.update(path[location], total_cost + path[cost])\n # should prob add something that goes through history and wipes old entry for that point\n for entry in history:\n if entry['To'] == path[location] and entry['Cost'] > total_cost + path[cost]:\n # print(\"found false entry\", entry)\n history.remove(entry)\n history.append({'From': current_position, 'To': path[location], 'By': path[action],\n 'Cost': total_cost + path[cost]})\n break\n while not problem.isGoalState(history[-1]['To']): # loop removes last couple of movements which don't lead to goal\n history.remove(history[-1])\n\n x = len(history)\n while x - 1 != 0: # loop clears out actions that dont come from previous position\n if history[x - 1]['From'] != history[x - 2]['To']: # starts from goal and works backwards\n history.remove(history[x - 2])\n x = len(history)\n else:\n x -= 1\n\n return [path['By'] for path in history]", "def bfs(game, game_coords):\n # *** main queue to record steps and corresponding costs ***\n queue_moves = [[game.player.row, game.player.col]]\n cost_moves = [0]\n\n # record cost and illegal moves\n cost = 1\n declined_moves = []\n\n # record the moves in the previous turn(iteration)\n last_steps = [[game.player.row, game.player.col]]\n\n # ***** Step 1: Marking game board using cost *****\n while True:\n\n # struggled in a location, loss\n if not last_steps:\n return 0, 0, 0\n\n # collect all potential moves: left, down, right, up, teleport(if possible)\n potential_steps = []\n for step in last_steps:\n potential_steps.append(left(step))\n potential_steps.append(down(step))\n potential_steps.append(right(step))\n potential_steps.append(up(step))\n\n if search_coords(game_coords, step) in ['1', '2', '3', '4', '5', '6', '7', '8', '9']:\n potential_steps.append(step)\n\n current_steps = []\n for step in potential_steps:\n if step in declined_moves:\n continue\n elif step in queue_moves:\n # the step existed in main queue, replace it if cost is lower, otherwise skip\n if cost >= cost_moves[queue_moves.index(step)]:\n if step != queue_moves[-1]:\n continue\n\n # check if move is legal\n will_move = step\n item = search_coords(game_coords, will_move)\n\n if item == '*' or item == -1:\n declined_moves.append(will_move)\n continue\n\n elif item == 'W':\n game.player.num_water_buckets += 1\n\n for i in range(len(game_coords['W'])):\n # water picked up, set current display from 'W' to ' ' in game_coords\n if game_coords['W'][i] == will_move:\n game_coords['W'].pop(i)\n game_coords[' '].append(will_move)\n break\n\n elif item == 'F':\n if game.player.num_water_buckets < 1:\n # cannot put out fire, refuse this move :(\n declined_moves.append(will_move)\n continue\n\n game.player.num_water_buckets -= 1\n elif item in ['1', '2', '3', '4', '5', '6', '7', '8', '9']:\n for coords in game_coords[item]:\n if coords != will_move:\n will_move = coords\n break\n\n current_steps.append(will_move)\n\n # append to main queue\n queue_moves.append(will_move)\n cost_moves.append(cost)\n\n cost += 1\n\n # reach end point\n if game_coords['Y'][0] in current_steps:\n break\n\n # last_steps <- current_steps\n last_steps = []\n last_steps.extend(current_steps)\n\n cost -= 1\n\n # ***** Step 2: recall through main queue to generate a path *****\n # *** Queue: last in first out ***\n recall_moves = queue_moves[::-1]\n recall_cost = cost_moves[::-1]\n cursor = recall_moves[0]\n\n # generated path\n route = []\n\n # 'action to cmd' translator\n action_map = {(1, 0): 'w', (-1, 0): 's', (0, 1): 'a', (0, -1): 'd'}\n\n for i in range(len(recall_moves)):\n if recall_cost[i] == cost - 1:\n x, y = coords_sub(recall_moves[i], cursor)\n\n # simple move: left, down, right, up\n if abs(x) + abs(y) == 1:\n cursor = recall_moves[i]\n cost -= 1\n route.insert(0, action_map[(x, y)])\n\n # teleport move\n elif teleport_pair(cursor, game_coords) != -1:\n pair = teleport_pair(cursor, game_coords)\n x, y = coords_sub(recall_moves[i], pair)\n\n # teleport after simple move\n if abs(x) + abs(y) == 1:\n cursor = recall_moves[i]\n cost -= 1\n route.insert(0, action_map[(x, y)])\n\n # teleport after no move ('e')\n elif abs(x) + abs(y) == 0:\n cursor = recall_moves[i]\n cost -= 1\n route.insert(0, 'e')\n\n # convert list of paths to string\n trace = ''\n for action in route:\n trace += action + ', '\n\n return 1, cost_moves[-1], trace", "def astar(maze):\n # TODO: Write your code here\n start = maze.getStart()\n # p_queue = Queue.PriorityQueue()\n p_queue = []\n dim = maze.getDimensions()\n rows = dim[0]\n cols = dim[1]\n # backtrace.\n visited = {} \n lookup_table = {}\n for i in range (0, rows):\n for j in range (0, cols):\n visited[(i,j)] = (-1, -1)\n # heuristic, cost, prev\n lookup_table[(i, j)] = (-1, -1, (-1, -1))\n end = maze.getObjectives()[0]\n path = []\n # add startpoint to the queue.\n start_heuristic = 0 + abs(start[0] - end[0]) + abs(start[1] - end[1])\n # format: heuristic, current point so we can better sort. \n p_queue.append((start_heuristic, start))\n lookup_table[start] = (start_heuristic, 0, (-2, -2))\n while p_queue:\n pair = p_queue.pop(0)\n visited[pair[1]] = lookup_table.get(pair[1])[2]\n if pair[1] == end:\n break\n else:\n list_of_neighbors = maze.getNeighbors(pair[1][0], pair[1][1])\n for i in list_of_neighbors:\n # if i is part of path, skip i.\n if visited.get(i) != (-1, -1):\n cost = lookup_table.get(pair[1])[1] + 1\n heuristic = cost + abs(i[0] - end[0]) + abs(i[1] - end[1])\n old_heuristic = lookup_table[i][0]\n if cost < lookup_table.get(i)[1]:\n lookup_table[i] = (heuristic, cost, pair[1])\n #remove node from explored set and move it to frontier.\n visited[i] = (-1,-1)\n bisect.insort(p_queue, (heuristic, i))\n else:\n continue\n # if i is in the queue, we may check whether the new path is better.\n if (lookup_table.get(i)[0], i) in p_queue:\n cost = lookup_table.get(pair[1])[1] + 1\n heuristic = cost + abs(i[0] - end[0]) + abs(i[1] - end[1])\n old_heuristic = lookup_table[i][0]\n if cost < lookup_table.get(i)[1]:\n lookup_table[i] = (heuristic, cost, pair[1])\n # remove item by value and insert it again to the p_queue. \n p_queue.remove((old_heuristic, i))\n bisect.insort(p_queue, (heuristic, i))\n # if the point is not in the open_list, then we can add it to the open_list and the look_up table.\n else:\n cost = lookup_table.get(pair[1])[1] + 1\n heuristic = cost + abs(i[0] - end[0]) + abs(i[1] - end[1])\n lookup_table[i] = (heuristic, cost, pair[1])\n bisect.insort(p_queue, (heuristic, i))\n # We are done!!!\n pt = end\n while pt != start:\n path.append(pt)\n pt = visited.get(pt)\n path.append(start)\n path.reverse()\n return path", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n class Node:\n def __init__(self, state, parent, action, pathCost):\n self.state = state\n self.parent = parent\n self.action = action\n self.pathCost = pathCost\n\n def solution(self):\n path = list()\n tempNode = self\n while tempNode.state != problem.getStartState():\n path.insert(0, tempNode.action)\n tempNode = tempNode.parent\n return path\n\n def __eq__(self, other):\n if isinstance(other, Node):\n return self.state == other.state\n\n\n def childNode(successor, parent, action, stepCost):\n pathCost = parent.pathCost + stepCost\n child = Node(successor, parent, action, pathCost)\n return child\n\n initialNode = Node(problem.getStartState(), None, None, 0)\n frontier = util.PriorityQueue() #ucs uses a priority queue\n frontier.push(initialNode, initialNode.pathCost)\n explored = set()\n\n while not frontier.isEmpty() :\n nextNode = frontier.pop() #extract from the start of the queue\n if problem.isGoalState(nextNode.state):\n return nextNode.solution()\n explored.add(nextNode.state)\n for successor, action, stepCost in problem.getSuccessors(nextNode.state):\n child = childNode(successor, nextNode, action, stepCost)\n if child.state not in explored:\n frontier.update(child, child.pathCost) #we only check if state is in explored because update does the other\n return []\n util.raiseNotDefined()", "def a_star(self, xy1, xy2):\n tile_col1, tile_row1 = self.the_map.xy_to_cr(xy1[0], xy1[1])\n tile_col2, tile_row2 = self.the_map.xy_to_cr(xy2[0], xy2[1])\n \n successor_to_parent_map = {}\n start_state = (tile_col1, tile_row1)\n #print('x=%d, y=%d to col=%d, row=%d (map row=%d, col= %d)' % (xy1[0], xy1[1], tile_col1, tile_row1, \n # self.the_map.tile_speeds.shape[0], self.the_map.tile_speeds.shape[1]))\n successor_to_parent_map[(start_state, None)] = None # (Successor, Action) -> (Parent, Action)\n \n open_list = PriorityQueue()\n open_list.update((start_state, None), 0)\n closed = []\n \n while not open_list.isEmpty():\n current_state, action_to_current_state = open_list.pop()\n \n if current_state == (tile_col2, tile_row2):\n return self.__get_action_path((current_state, action_to_current_state), successor_to_parent_map)\n \n if current_state not in closed:\n if current_state == start_state:\n current_cost = 0\n else:\n current_cost = len(self.__get_action_path((current_state, action_to_current_state),\n successor_to_parent_map))\n \n for successor_state, action, step_cost in self.__get_successors(current_state):\n cost = current_cost + step_cost + self.__cartesian_distance(current_state, successor_state)\n \n open_list.update((successor_state, action), cost)\n \n if successor_state not in closed:\n successor_to_parent_map[(successor_state, action)] = (current_state, action_to_current_state)\n \n closed.append(current_state)\n return []", "def a_star(my_map, start_locs, goal_locs, h_values, agent, constraints):\n\n ##############################\n # Task 1.1: Extend the A* search to search in the space-time domain\n # rather than space domain, only.\n # Build constraint table if there are constraints\n\n constraint_table = build_constraint_table(constraints, agent)\n\n open_list = []\n closed_list = dict()\n nodes_opened = 0\n max_opened = 500\n start_loc = start_locs[0]\n goal_loc = goal_locs[0]\n if len(start_locs) > 1: # If there is more than 1 start location then this is a multi-cell agent\n multi = True\n else:\n multi = False\n\n # determine when the last constraint is on the goal node (or any of the goal node cells in the case of multi-cell)\n earliest_goal_timestep = 0\n if len(constraint_table) != 0:\n for time in [item for item in sorted(list(constraint_table.keys()), reverse=True)]:\n flat_list = [item for sublist in constraint_table[time] for item in sublist]\n if(goal_locs[0] in flat_list):\n earliest_goal_timestep = time\n break\n elif(multi): # if multi cell check if any of the agents goal cells are constrained \n if(goal_locs[1] in flat_list): \n earliest_goal_timestep = time\n break\n\n h_value = h_values[start_loc]\n goal_orientation = orientation(goal_locs)\n\n root = {'loc': start_loc,'orientation': orientation(start_locs), 'g_val': 0, 'h_val': h_value, 'time': 0, 'parent': None}\n push_node(open_list, root)\n closed_list[(root['loc'], root['time'], root['orientation'])] = root\n\n while len(open_list ) > 0 and nodes_opened < max_opened:\n curr = pop_node(open_list)\n nodes_opened = nodes_opened + 1\n \n if curr['loc'] == goal_loc and curr['orientation'] == goal_orientation and curr['time'] >= earliest_goal_timestep:\n return get_path(curr)\n ############################\n child_orient = curr['orientation']\n for dir in range(7):\n if dir < 5:\n child_loc = move(curr['loc'], dir)\n elif not multi: \n continue\n\n if dir == 5:\n # clockwise rotation \n child_orient = curr['orientation'] - 1\n if child_orient < 1:\n child_orient = 4\n if dir == 6:\n # counter-clockwise rotation \n child_orient = curr['orientation'] + 1\n if child_orient > 4:\n child_orient = 1\n \n if test_map(my_map, child_loc[0], child_loc[1], child_orient, dir):\n continue\n \n # check if the head location is constrained \n if is_constrained(curr['loc'], child_loc, child_orient, dir, curr['time'] + 1, constraint_table):\n continue\n\n # if this is a multi cell agent check if the tail is constrained \n if multi:\n # check the next tail location \n row_t, col_t, _, _ = find_tail_positions(curr['loc'][0], curr['loc'][1], curr['orientation'], dir)\n next_row_t, next_col_t, next_row_t_inter, next_col_t_inter = find_tail_positions(child_loc[0], child_loc[1], child_orient, dir)\n\n if is_constrained((row_t,col_t), (next_row_t, next_col_t), child_orient, dir, curr['time'] + 1, constraint_table):\n continue\n\n # if the agent is rotating check if the intermediate location is constrained\n if dir == 5 or dir == 6: \n if is_constrained((row_t,col_t), (next_row_t_inter, next_col_t_inter), child_orient, dir, curr['time'] + 1, constraint_table):\n continue\n\n child = {'loc': child_loc,\n 'orientation': child_orient,\n 'g_val': curr['g_val'] + 1,\n 'h_val': h_values[child_loc] + orient_cost(child_orient, goal_orientation),\n 'time': curr['time'] + 1,\n 'parent': curr}\n\n if (child['loc'], child['time'], child['orientation']) in closed_list:\n existing_node = closed_list[(child['loc'], child['time'], child['orientation'])]\n \n if compare_nodes(child, existing_node):\n closed_list[(child['loc'], child['time'], child['orientation'])] = child\n push_node(open_list, child)\n else:\n closed_list[(child['loc'], child['time'], child['orientation'])] = child\n push_node(open_list, child)\n \n return None # Failed to find solutions", "def A_Star(start, goal, final_occupancy_grid):\n x, y = np.mgrid[0:LENGTH:1, 0:WIDTH:1]\n pos = np.empty(x.shape + (2,))\n # x.shape = (LENGTH,WIDTH)\n # x.shape + (2,) = (LENGTH,WIDTH,2)\n pos[:, :, 0] = x\n pos[:, :, 1] = y\n # pos.shape = (1890, 2)\n pos = np.reshape(pos, (x.shape[0] * x.shape[1], 2))\n coords = list([(int(x[0]), int(x[1])) for x in pos])\n # Define the heuristic:\n # h: dictionary containing the distance to goal ignoring obstacles for all coordinates in the grid (heuristic function)\n h = np.linalg.norm(pos - goal, axis=1)\n # If axis is an integer, it specifies the axis of x along which to compute the vector norms\n # axis = 1: h.shape = 1890\n # axis = 0: h.shape = 2\n h = dict(zip(coords, h))\n\n # Check if the start and goal are within the boundaries of the map\n for point in [start, goal]:\n\n if point[0] < 0 and point[0] >= final_occupancy_grid.shape[0]:\n raise Exception('Start node/goal node is not contained in the map')\n\n if point[1] < 0 and point[1] >= final_occupancy_grid.shape[1]:\n raise Exception('Start node/goal node is not contained in the map')\n\n # check if start and goal nodes correspond to free spaces\n if final_occupancy_grid[start[0], start[1]]:\n raise Exception('Start node is not traversable')\n\n if final_occupancy_grid[goal[0], goal[1]]:\n raise Exception('Goal node is not traversable')\n\n # get the possible movements\n movements = get_movements_8n()\n\n # The set of visited nodes that need to be (re-)expanded, i.e. for which the neighbors need to be explored\n # Initially, only the start node is known.\n openSet = [start]\n\n # The set of visited nodes that no longer need to be expanded.\n closedSet = []\n\n # For node n, cameFrom[n] is the node immediately preceding it on the cheapest path from start to n currently known.\n cameFrom = dict()\n\n # For node n, gScore[n] is the cost of the cheapest path from start to n currently known.\n gScore = dict(zip(coords, [np.inf for x in range(len(coords))]))\n gScore[start] = 0\n\n # For node n, fScore[n] := gScore[n] + h(n). map with default value of Infinity\n fScore = dict(zip(coords, [np.inf for x in range(len(coords))]))\n fScore[start] = h[start]\n\n # while there are still elements to investigate\n while openSet != []:\n\n # the node in openSet having the lowest fScore[] value\n fScore_openSet = {key: val for (key, val) in fScore.items() if key in openSet}\n current = min(fScore_openSet, key=fScore_openSet.get)\n del fScore_openSet\n\n # If the goal is reached, reconstruct and return the obtained path\n if current == goal:\n # print(\"Path\", closedSet)\n return reconstruct_path(cameFrom, current)\n\n openSet.remove(current)\n closedSet.append(current)\n\n # for each neighbor of current:\n for dx, dy, deltacost in movements:\n\n neighbor = (current[0] + dx, current[1] + dy)\n\n # if the node is not in the map, skip\n if (neighbor[0] >= final_occupancy_grid.shape[0]) or (neighbor[1] >= final_occupancy_grid.shape[1]) or (\n neighbor[0] < 0) or (neighbor[1] < 0):\n continue\n\n # if the node is occupied, skip\n if (final_occupancy_grid[neighbor[0], neighbor[1]]):\n continue\n\n # if the has already been visited, skip\n if (neighbor in closedSet):\n continue\n # d(current,neighbor) is the weight of the edge from current to neighbor\n # tentative_gScore is the distance from start to the neighbor through current\n tentative_gScore = gScore[current] + deltacost\n\n if neighbor not in openSet:\n openSet.append(neighbor)\n\n if tentative_gScore < gScore[neighbor]:\n # This path to neighbor is better than any previous one. Record it!\n cameFrom[neighbor] = current\n gScore[neighbor] = tentative_gScore\n fScore[neighbor] = gScore[neighbor] + h[neighbor]\n\n # Open set is empty but goal was never reached\n print(\"No path found to goal\")\n return [], closedSet", "def astar(grid, heuristic):\r\n\r\n print (grid.getStart())\r\n frontier = PriorityQueue()\r\n frontierCpy = {}\r\n\r\n goal = grid.getGoals()[0]\r\n\r\n startX = grid.getStart()[0]\r\n startY = grid.getStart()[1]\r\n startNode = Node(((startX, startY), 0), None)\r\n\r\n init_heu = heuristic(startNode.cell[0], goal)\r\n frontierCpy[startNode.cell[0]] = init_heu\r\n frontier.put((init_heu, 0, startNode))\r\n\r\n while frontier.qsize() != 0:\r\n tup = frontier.get()\r\n\r\n currNode = tup[2]\r\n currG = tup[1] * -1\r\n grid.addVisited(currNode.cell[0])\r\n frontierCpy.pop(currNode.cell[0], None)\r\n\r\n if currNode.cell[0] == goal:\r\n path = []\r\n while currNode != None:\r\n path.insert(0, currNode.cell[0])\r\n currNode = currNode.parent\r\n grid.setPath(path)\r\n return path\r\n\r\n\r\n neighbors = grid.getNeighbors(currNode.cell[0])\r\n\r\n for n in neighbors:\r\n if n[0] not in grid.getVisited():\r\n newNode = Node(n, currNode)\r\n\r\n h = heuristic(n[0], goal)\r\n\r\n oneStepCost = n[1]\r\n g = oneStepCost + currG\r\n if n[0] not in frontierCpy or frontierCpy[n[0]] > h + g:\r\n frontier.put((h+g, -1*g, newNode))\r\n frontierCpy[n[0]] = h+g\r\n print(\"CANT FIND A PATH\")", "def Find_Path(self):\n closed_nodes_map = [] # map of closed (tried-out) nodes\n open_nodes_map = [] # map of open (not-yet-tried) nodes\n dir_map = [] # map of directions\n row = [0] * self.n\n for i in range(self.m): # create 2d arrays\n closed_nodes_map.append(list(row))\n open_nodes_map.append(list(row))\n dir_map.append(list(row))\n \n pq = [[], []] # priority queues of open (not-yet-tried) nodes\n pqi = 0 # priority queue index\n # create the start node and push into list of open nodes\n n0 = node(self.xStart, self.yStart, 0.0, 0.0)\n n0.updatePriority(self.xFinish, self.yFinish)\n heappush(pq[pqi], n0)\n open_nodes_map[self.yStart][self.xStart] = n0.priority # mark it on the open nodes map\n \n # A* search\n while len(pq[pqi]) > 0:\n # get the current node w/ the highest priority\n # from the list of open nodes\n n1 = pq[pqi][0] # top node\n n0 = node(n1.xPos, n1.yPos, n1.distance, n1.priority)\n x = n0.xPos\n y = n0.yPos\n heappop(pq[pqi]) # remove the node from the open list\n open_nodes_map[y][x] = 0\n # mark it on the closed nodes map\n closed_nodes_map[y][x] = 1\n \n # quit searching when the goal state is reached\n if x == self.xFinish and y == self.yFinish:\n # Generate the path from finish to start by following the \n # directions.\n return self.Reconstruct_Path(dir_map)\n \n # generate moves (child nodes) in all possible directions\n for i in range(self.num_directions):\n new_x = x + self.dx[i]\n new_y = y + self.dy[i]\n Flag=True\n if not (new_x < 0 or new_x > self.n-1 or new_y < 0 or new_y > self.m - 1\n or self.MAP[new_y][new_x] == 1 or closed_nodes_map[new_y][new_x] == 1):\n # Check to see if the extended path runs through any obstacles\n if (abs(self.dx[i])>1 or abs(self.dy[i])>1):\n # Need to check that the path does not pass an object\n JumpCells=2*max(abs(self.dx[i]),abs(self.dy[i]))-1\n for K in range(1,JumpCells):\n YPOS=int(round(K*1.0*self.dy[i]/JumpCells))\n XPOS=int(round(K*1.0*self.dx[i]/JumpCells))\n if (self.MAP[y+YPOS][x+XPOS]==1):\n Flag=False\n if Flag: \n # generate a child node\n m0 = node(new_x, new_y, n0.distance, n0.priority)\n m0.calc_cost(self.dx[i], self.dy[i])\n m0.updatePriority(self.xFinish, self.yFinish)\n # if it is not in the open list then add into that\n if open_nodes_map[new_y][new_x] == 0:\n open_nodes_map[new_y][new_x] = m0.priority\n heappush(pq[pqi], m0)\n # mark its parent node direction\n dir_map[new_y][new_x] = (self.num_directions-i-1) % self.num_directions\n elif open_nodes_map[new_y][new_x] > m0.priority:\n # update the priority info\n open_nodes_map[new_y][new_x] = m0.priority\n # update the parent direction info\n dir_map[new_y][new_x] = (self.num_directions-i-1) % self.num_directions\n # replace the node\n # by emptying one pq to the other one\n # except the node to be replaced will be ignored\n # and the new node will be pushed in instead\n while not (pq[pqi][0].xPos == new_x and pq[pqi][0].yPos == new_y):\n heappush(pq[1 - pqi], pq[pqi][0])\n heappop(pq[pqi])\n heappop(pq[pqi]) # remove the wanted node\n # empty the larger size pq to the smaller one\n if len(pq[pqi]) > len(pq[1 - pqi]):\n pqi = 1 - pqi\n while len(pq[pqi]) > 0:\n heappush(pq[1-pqi], pq[pqi][0])\n heappop(pq[pqi]) \n pqi = 1 - pqi\n heappush(pq[pqi], m0) # add the better node instead\n return '','' # no route found", "def a_star(grid, heuristic_func, start, goal):\n\n path = []\n path_cost = 0\n queue = PriorityQueue()\n queue.put((0, start))\n visited = set(start)\n\n branch = {}\n found = False\n\n while not queue.empty():\n item = queue.get()\n current_cost = item[0]\n current_node = item[1]\n\n if current_node == goal:\n print('Found a path.')\n found = True\n break\n else:\n # Get the new vertexes connected to the current vertex\n for a in valid_actions(grid, current_node):\n next_node = (current_node[0] + a.delta[0], current_node[1] + a.delta[1])\n new_cost = current_cost + a.cost + heuristic_func(next_node, goal)\n\n if next_node not in visited:\n visited.add(next_node)\n queue.put((new_cost, next_node))\n\n branch[next_node] = (new_cost, current_node, a)\n\n if found:\n # retrace steps\n n = goal\n path_cost = branch[n][0]\n while branch[n][1] != start:\n path.append(branch[n][1])\n n = branch[n][1]\n path.append(branch[n][1])\n\n return path[::-1], path_cost", "def a_star_obs(obs_map):\n world_ndarray = np.copy(obs_map[0])\n\n start = tuple(np.argwhere(world_ndarray == -2)[0])\n goal = tuple(np.argwhere(world_ndarray == -3)[0])\n\n world_ndarray[world_ndarray == -2] = 0\n world_ndarray[world_ndarray == -3] = 0\n\n world_tuple = tuple(map(tuple, world_ndarray))\n\n def h_custom_i(cur, end, obstacle):\n ytop, ybot, minx = obstacle\n cur_y, cur_x = cur\n end_y, end_x = end\n obs_bot = np.where(world_ndarray[ybot] == -1)[0][0]\n mid_y = ybot + (ytop - ybot) // 2\n if cur_y in range(ybot, ytop) and cur_x in range(max(obs_bot, start[1]), end_x):\n return 5000 - abs(minx - cur_x) ** 2 - abs(cur_y - mid_y) ** 2\n return abs(cur_x - end_x) + abs(cur_y - end_y)\n\n pr_queue = [] # Use heapqueue as priority queue\n heappush(pr_queue, (0 + h_custom_i(start, goal, obs_map[1]), 0, \"\", start))\n visited = set() # Each element has to be unique in a set\n graph = get_neighbors(world_tuple)\n route_str = \"\"\n\n while pr_queue:\n _, cost, path, current = heappop(pr_queue)\n if current == goal:\n route_str = path\n break\n if current in visited:\n continue\n visited.add(current)\n for direction, neighbour in graph[current].iteritems():\n heappush(pr_queue, (cost + h_custom_i(neighbour, goal, obs_map[1]), cost + 1, path + direction, neighbour))\n world_ndarray[neighbour] = cost + 1\n\n # print \"Expanded nodes(A*+Custom H): \", len(visited), \" Path length: \", len(route_str)\n # Convert string directions to 2D(x,y) coordinates\n route_coord = [start]\n for p in route_str:\n route_coord.append(graph[route_coord[-1]][p])\n\n world_ndarray[start] = -2 # Mark the start and end coordinates again\n world_ndarray[goal] = -3\n\n return route_coord, world_ndarray, len(visited), len(route_str)", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n path_to_point = {}\n cost_to_point = {}\n\n # Get the start node\n start_node = problem.getStartState()\n fringe_node = [start_node]\n path_to_point[start_node] = []\n cost_to_point[start_node] = problem.getCostOfActions(path_to_point[start_node])\n\n goal_found = False\n\n while(not goal_found):\n #for i in range(100): \n nodes_to_expand = set()\n # get max value node in the fringe node\n min_val = float(\"inf\")\n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] < min_val:\n min_val = cost_to_point[one_node]\n \n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] == min_val:\n nodes_to_expand.add(one_node)\n fringe_node.remove(one_node)\n\n # Expand the fringe node \n for one_node in nodes_to_expand:\n path_to_parent = path_to_point[one_node]\n for nxt_node in problem.getSuccessors(one_node):\n pos = nxt_node[0]\n mv = nxt_node[1]\n # check if point already present in path to point\n prev_cost = float(\"inf\")\n if pos in cost_to_point:\n prev_cost = cost_to_point[pos]\n new_path = path_to_parent + [mv]\n if prev_cost > problem.getCostOfActions(new_path):\n path_to_point[pos] = new_path\n cost_to_point[pos] = problem.getCostOfActions(new_path)\n fringe_node.append(pos)\n\n # Check if destination is reached in the fringe node\n for one_node in fringe_node:\n if problem.isGoalState(one_node):\n final_node = one_node\n goal_found = True\n break\n \n #print(len(fringe_node))\n print(final_node)\n print(path_to_point[final_node])\n return path_to_point[final_node] \n\n util.raiseNotDefined()", "def waStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE FOR TASK 2 ***\"\n\n priorityFunc = lambda x: x[2] + 2*heuristic(x[0], problem)\n\n # initialize a priority queue\n open = util.PriorityQueue()\n closed = []\n consistencyCheck = []\n\n # Retrieve the init state\n init = (problem.getStartState(), ['Stop'], 0)\n open.push(init, priorityFunc(init))\n while not open.isEmpty():\n currNode = open.pop()\n currState = currNode[0]\n currPath = currNode[1]\n currPathCost = currNode[2]\n\n h0 = heuristic(currState, problem)\n\n if problem.isGoalState(currState):\n print(\"consistent? :\", consistencyCheck.count(True) == len(consistencyCheck))\n return currPath[1:]\n else:\n closed.append(currState)\n successors = problem.getSuccessors(currState)\n\n if len(successors) > 0:\n for each in successors:\n newState = each[0]\n newPathCost = currPathCost + each[2]\n h1 = heuristic(newState, problem)\n consistencyCheck.append(h0-h1 <= 1)\n if newState not in closed:\n temp = (each[0], currPath + [each[1]], newPathCost)\n open.update(temp, priorityFunc(temp))\n\n return False", "def determineNextMove(player_location, opponentLocation, coins):\n global route, currentcoin, meta_route, best_weight, best_path, coins_to_search, index\n if opponentLocation in coins_to_search:\n coins_to_search, meta_route, route = change_way(coins, opponentLocation, player_location)[:3]\n index = 0\n elif currentcoin == player_location: \n if len(route) != 0:\n old_dist = algo.dijkstra(mazeMap, player_location)[1][meta_route[index+1]]\n coins_to_search2, meta_route2, route2, new_dist = change_way(coins, opponentLocation, player_location)\n\n #dist_matrix, route_matrix = u.update_dists_from_each(dists_matrix, routes_matrix, player_location, mazeMap, coins)\n #coins_to_search = get_n_shortest(3, coins, player_location, dists_matrix)\n \t\n #ennemy_dists = algo.dijkstra(mazeMap, opponentLocation)\n #for c in coins_to_search:\n #if len(coins_to_search) >= 2 and ennemy_dists[1][c] < dists_matrix[player_location][c]:\n # coins_to_search.remove(c)\n #break\n \t\t\n #best_weight = float(\"inf\")\n #best_path = []\n #exhaustive(coins_to_search, player_location, [], 0, dist_matrix)\n #meta_route2 = [player_location] + best_path\n #route2 = u.location_list_to_route(meta_route2, route_matrix)\n #new_dist = dist_matrix[player_location][meta_route2[1]]\n\t\t\n if len(route) == 0 or old_dist - new_dist > 3:\n route = route2\n meta_route = meta_route2 \n index = 0\n index += 1\n currentcoin = meta_route[index]\n #api.debug(route)\n return u.direction(player_location, route.pop(0))", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n\n templist=[]\n explored = set()\n fringe = util.PriorityQueue()\n # state, list of directions till now and the cost is pushed in the stack\n # so that algorithm can explore the node with lowest cost first\n fringe.push((problem.getStartState(),templist),1)\n\n while (not fringe.isEmpty()):\n (currentNode,currDir) = fringe.pop()\n\n if problem.isGoalState(currentNode):\n pathToGoal = currDir\n break\n if not (currentNode in explored):\n explored.add(currentNode)\n for childNode in problem.getSuccessors(currentNode):\n # total cost is cost till now plus cost to the child node\n totalCost = childNode[2]+problem.getCostOfActions(currDir)\n fringe.push((childNode[0],currDir+[childNode[1]]),totalCost)\n\n\n\n\n return pathToGoal;", "def astar_multi(maze):\n graph_ = Graph(maze.getObjectives())\n\n pq = []\n visited = {}\n\n goals = maze.getObjectives()\n start = maze.getStart()\n\n tie = 1\n #\n # tuple = (f,g,h,x&y,tiebreaker, goals left, currpath, visited)\n # h = min_manhattan(goals, start)\n h = mst_heur(start, goals, graph_)\n\n curr = (h, 0, h, start, goals, 0, [])\n heapq.heappush(pq, curr)\n\n food = None\n while len(pq) > 0:\n curr = heapq.heappop(pq)\n # print(\"curr:\", curr)\n if curr[3] in curr[4]:\n curr[4].remove(curr[3])\n if len(curr[4]) == 0:\n # print(\"DONE\")\n # print(food)\n food = curr\n break\n neighbors = maze.getNeighbors(curr[3][0], curr[3][1])\n for n in neighbors:\n curr_goals_left = curr[4].copy()\n curr_visited = curr[6].copy()\n tie += 1\n\n # print(\"curr[6]: \", curr[6])\n # print(\"n: \", n)\n # print\n\n # h2 = min_manhattan(curr[4], n)\n h2 = mst_heur(n, curr[4], graph_)\n f2 = h2 + curr[1]\n g2 = curr[1] + 1\n\n node_new = (f2, g2, h2, n, curr_goals_left, tie, curr_visited)\n\n if node_new[3] not in visited or node_new[4] not in visited[node_new[3]][1]:\n if node_new[3] not in visited:\n visited[node_new[3]] = (node_new[3], [])\n visited[node_new[3]][1].append(node_new[4])\n node_new[6].append(curr[3])\n heapq.heappush(pq, node_new)\n\n if food is None:\n return []\n\n food[6].append(food[3])\n\n return food[6]", "def heuristic(state, problem):\n # It would take a while for Flat Earther's to get accustomed to this paradigm\n # but hang in there.\n current = problem.G.node[state.state]\n final = problem.G.node[problem.end_node]\n clon = (current['x'], 0, 0)\n clat = (current['y'], 0, 0)\n flon = (final['x'], 0, 0)\n flat = (final['y'], 0, 0)\n hn = util.points2distance((clon, clat), (flon, flat))\n return hn\n # util.raiseNotDefined()", "def waStarSearch2(problem, heuristic=nullHeuristic):\n priorityFunc = lambda x: x[2] + 2*heuristic(x[0], problem)\n\n # initialize a priority queue\n open = util.PriorityQueue()\n\n # Retrieve the init state\n init = (problem.getStartState(), ['Stop'], 0)\n open.push(init, priorityFunc(init))\n bestG = {}\n while not open.isEmpty():\n\n currNode = open.pop()\n currState = currNode[0]\n currPath = currNode[1]\n currPathCost = currNode[2]\n\n if problem.isGoalState(currState):\n return currPath[1:]\n successors = problem.getSuccessors(currState)\n\n if len(successors) > 0:\n for each in successors:\n newPos = each[0]\n newPathCost = currPathCost + each[2]\n if newPos not in bestG.keys() or newPathCost < bestG[newPos]:\n bestG[newPos] = newPathCost\n temp = (each[0], currPath + [each[1]], newPathCost)\n hval = heuristic(each[0], problem)\n if hval < float('inf'):\n open.update(temp, priorityFunc(temp))\n\n return False", "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n from game import Actions\n\n waiting_list = util.PriorityQueue()\n COSTS = {}\n start_state = problem.getStartState()\n COSTS[start_state] = 0\n waiting_list.push(start_state,0)\n parents = {}\n \n while not waiting_list.isEmpty():\n q_state = waiting_list.pop()\n if problem.isGoalState(q_state):\n target_state = q_state\n break\n for child in problem.getSuccessors(q_state):\n n_cost = COSTS[q_state] + child[2]\n \n if child[0] not in COSTS or n_cost < COSTS[q_state]:\n COSTS[child[0]] = n_cost\n prior = n_cost + heuristic(child[0], problem)\n waiting_list.push(child[0], prior)\n parents[child[0]] = q_state\n\n sequence = []\n prev_state = target_state\n while target_state in parents.keys():\n target_state = parents[target_state]\n direction = Actions.vectorToDirection([prev_state[0] - target_state[0], prev_state[1] - target_state[1]])\n prev_state = target_state\n sequence.append(direction)\n \n return sequence[::-1]", "def heuristic(board, node_coordinate, start_coordinate, goal_coordinate):\r\n # The cost is initially just the Manhattan distance\r\n cost = manhattan_distance(node_coordinate, goal_coordinate)\r\n\r\n # If a node is adjacent to a gate on the CircuitBoard its cost is\r\n # drastically increased. This excludes the start and goal gates.\r\n adjacents = board.get_adjacent_coordinates(node_coordinate, cube=True)\r\n if goal_coordinate in adjacents:\r\n adjacents.remove(goal_coordinate)\r\n if start_coordinate in adjacents:\r\n adjacents.remove(start_coordinate)\r\n if any(adjacent in adjacents for adjacent in board.gate_coordinates):\r\n cost += 1000\r\n\r\n # 3/4 of amount of non-empty spots in the node's layer is added to its cost\r\n # to discourage overcrowding single layers.\r\n nonzeros = np.count_nonzero(board.board[node_coordinate[0]])\r\n cost += nonzeros * 0.75\r\n\r\n # To encourage moving upwards, each layer has a weight which decreases the\r\n # cost with a factor of the weight.\r\n height_weights = [1, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3]\r\n cost *= height_weights[node_coordinate[0]]\r\n return cost", "def second_heuristic(self):\r\n directions = [[-1, -1], [-1, 1], [1, 1], [1, -1]]\r\n # aceasta matrice indica valoarea pe care o are mutarea unei piese pe o celula aleasa\r\n # se va aduna la media ponderilor adunate in lista weights\r\n\r\n # mijlocul tablei este punctul cel mai vulnerabil\r\n # in timp ce lateralele sunt sigure,iar linia bazei transforma piesa in rege\r\n\r\n points = [[0, 4, 0, 4, 0, 4, 0, 4],\r\n [4, 0, 3, 0, 3, 0, 3, 0],\r\n [0, 3, 0, 2, 0, 2, 0, 4],\r\n [4, 0, 2, 0, 1, 0, 3, 0],\r\n [0, 3, 0, 1, 0, 2, 0, 4],\r\n [4, 0, 2, 0, 1, 0, 3, 0],\r\n [0, 3, 0, 2, 0, 2, 0, 4],\r\n [4, 0, 4, 0, 4, 0, 4, 0]]\r\n\r\n weights = [0 for i in range(4)]\r\n whites, blacks = 0, 0\r\n for i in range(8):\r\n for j in range(8):\r\n\r\n # numaram discurile de fiecare culoarea\r\n blacks += 1 if self.matrix[i][j] in ['N', 'n'] else 0\r\n whites += 1 if self.matrix[i][j] in ['A', 'a'] else 0\r\n\r\n if self.matrix[i][j] in [self.current_player, self.current_player.upper()]:\r\n\r\n # daca e piesa normala\r\n if self.matrix[i][j] == self.current_player:\r\n weights[0] += 4\r\n\r\n # cat de aproape este piesa de a deveni rege ( nr de linii din tabla - cate mai are pana ajunge pe ultima linie)\r\n\r\n # cu cat se apropie piesa mai multe de a deveni rege, scorul creste( negru - rege pentru i=0, alb -rege pentru i =7)\r\n if self.matrix[i][j] == 'n':\r\n weights[1] += (7 - i)\r\n elif self.matrix[i][j] == 'a':\r\n weights[1] += i\r\n else:\r\n # daca e piesa rege\r\n weights[0] += 8\r\n\r\n # cat de aproape este piesa rege de celelalte piese\r\n for d in directions:\r\n if self.matrix[i][j] == self.current_player.upper():\r\n # gaseste pe diagonala in directia d, o piesa adversara,daca exista\r\n x, y = self.find_piesa(i, j, d)\r\n if x and y:\r\n weights[2] += (x - i) * (x - i) + (y - j) * (y - j)\r\n vx = d[0] + i\r\n vy = d[1] + j\r\n back_x = i - d[0]\r\n back_y = j - d[1]\r\n next_x, next_y = vx + d[0], vy + d[1]\r\n # piesele pe care le poate captura jucatorul, daca e piesa rege are un scor mai mare\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(next_x, next_y) and self.matrix[next_x][next_y] == '.':\r\n if self.matrix[next_x][next_y] == self.opponent().upper():\r\n weights[3] += 7\r\n else:\r\n weights[3] += 4\r\n # piese care pot fi capturate; la fel daca este piesa rege atunci se scade mai mult scorul\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(back_x, back_y) and self.matrix[back_x][back_y] == '.':\r\n if self.matrix[vx][vy] == self.opponent().upper():\r\n weights[3] -= 6\r\n else:\r\n weights[3] -= 3\r\n # adunam piesa la media sumei date pentru a face AI-ul in caz de egalitate a scorului\r\n # sa imi aleaga piesa care ma pozitioneaza mai bine\r\n if self.move:\r\n return sum(weights) / 4 + points[self.move[0]][self.move[1]]\r\n return sum(weights) / 4\r\n\r\n def __str__(self):\r\n s = ' '\r\n for i in range(8):\r\n s += str(i) + ' '\r\n s += '\\n'\r\n for index, line in enumerate(self.matrix):\r\n s += str(chr(index + ord('a'))) + ' '\r\n for el in line:\r\n s += str(el) + ' '\r\n s += '\\n'\r\n\r\n return s", "def aStarSearch(problem, heuristic=nullHeuristic):\n stack = PriorityQueue()\n\n visited = []\n parent_dict = dict()\n start_state = problem.getStartState()\n stack.push(start_state, 0)\n actions_dict = dict()\n final_actions = []\n discovered = [problem.getStartState]\n cost_dict = dict()\n h_dict = dict()\n g_dict = dict()\n\n h_dict[start_state] = heuristic(start_state, problem)\n g_dict[start_state] = 0\n cost_dict[start_state] = 0\n parent_dict[start_state] = (420, 420)\n cost_dict[(420, 420)] = 0\n\n if problem.isGoalState(problem.getStartState()):\n return []\n\n while not stack.isEmpty():\n current_state = stack.pop()\n\n if current_state not in visited:\n\n visited.append(current_state)\n\n if problem.isGoalState(current_state):\n break\n successors = problem.getSuccessors(current_state)\n for s in successors:\n if s[0] not in visited:\n if s[0] not in cost_dict:\n h_dict[s[0]] = heuristic(s[0], problem)\n g_dict[s[0]] = g_dict[current_state] + s[2]\n cost_dict[s[0]] = g_dict[s[0]] + h_dict[s[0]]\n stack.push(s[0], cost_dict[s[0]])\n parent_dict[s[0]] = current_state\n actions_dict[(current_state, s[0])] = s[1]\n discovered.append(s[0])\n elif heuristic(s[0],problem) + g_dict[current_state] + s[2] < cost_dict[s[0]]:\n h_dict[s[0]] = heuristic(s[0], problem)\n g_dict[s[0]] = g_dict[current_state] + s[2]\n cost_dict[s[0]] = g_dict[s[0]] + h_dict[s[0]]\n stack.push(s[0], cost_dict[s[0]])\n parent_dict[s[0]] = current_state\n actions_dict[(current_state, s[0])] = s[1]\n\n while current_state is not start_state:\n parent = parent_dict[current_state]\n final_actions.append(actions_dict[parent, current_state])\n current_state = parent\n\n final_actions.reverse()\n return final_actions", "def calc_heuristic(self, state):\n h = 0\n board = state.board.array\n\n for i in range(self._n):\n for j in range(self._n):\n\n if board[i][j] != space_rep:\n tile_as_number = board[i][j]\n correct_x = (tile_as_number - 1) // self._n\n correct_y = (tile_as_number - 1) % self._n\n else:\n continue\n h += calc_diffs(i, j, correct_x, correct_y)\n return h", "def __init__(self,start,goal,theta_s,clearance,radius,rpm1,rpm2):\r\n #clearance = 5\r\n #radius = 10\r\n self.padding = clearance + radius\r\n self.ground_truth={}\r\n\r\n self.obstacle=[]\r\n self.rpm1=rpm1\r\n self.rpm2=rpm2\r\n self.expanded=[]\r\n\r\n self.parent=[]\r\n self.parent_orignal_data={}\r\n \r\n self.start=start\r\n #print(self.start)\r\n self.theta=theta_s\r\n self.theta_diff=30\r\n self.n=int(self.theta)\r\n self.frontier={}\r\n self.frontier[self.start[0],self.start[1],self.n]=0\r\n self.start_score=self.string(self.start[0],self.start[1],self.n)\r\n self.frontier_string=[]\r\n self.cost_togo={}\r\n self.cost_togo[self.start_score,self.n]=0\r\n self.parent_orignal_data[self.start_score]=None\r\n self.cost={}\r\n #self.cost=0\r\n self.goal=goal\r\n self.cost[self.start_score,self.n]=self.cost_togo[self.start_score,self.n]+self.h(self.start[0],self.start[1],self.theta)\r\n #self.cost[self.start_score,self.n]=self.cost_togo[self.start_score,self.n]+self.h(self.start[0],self.start[1])\r\n self.data_with_string={}\r\n self.data_with_string[self.start_score]=self.start\r\n self.current_score=\"00\"\r\n self.i=1\r\n self.theta_diff=30\r\n #self.cost={}\r\n #self.cost[self.start_score]=0\r\n self.dt=0.2\r\n self.threshold=1\r\n self.maximum_size=999\r\n self.parent_pos=(self.start[1],self.maximum_size-self.start[0])\r\n self.image_p=np.zeros([int(floor((self.maximum_size+1))),int(floor((self.maximum_size+1))),(360)])\r\n self.action_rpm=[[0,rpm1],[rpm1,0],[rpm1,rpm1],[0,rpm2],[rpm2,0],[rpm2,rpm2],[rpm1,rpm2],[rpm2,rpm1]]\r\n self.action_index={}", "def aStarSearch(problem, heuristic=myHeuristic):\n\n #frontier = util.PriorityQueue()\n #startState = problem.getStartState()\n #startNode = (startState, ['East'], 0)\n #frontier.push(startNode, 0)\n\n #currentState, actions, currentCost = frontier.pop()\n #return ['West','West', 'West','West','South','South','East', 'South','South','West','West']\n\n fronteira = util.PriorityQueue()\n\n nohExplorado = [] #(state, cost)\n\n startState = problem.getStartState()\n nohInicial = (startState, [], 0) #(state, action, cost)\n\n fronteira.push(nohInicial, 0)\n\n while not fronteira.isEmpty():\n\n #pega o Noh de menor \"custo\" na fila\n curEstado, todasAcoes, curCusto = fronteira.pop()\n\n #Coloca Noh atual na lista de explorados\n nohAtual = (curEstado, curCusto)\n nohExplorado.append((curEstado, curCusto))\n\n if problem.isGoalState(curEstado):\n #print(todasAcoes)\n return todasAcoes\n\n else:\n #Lista de Sucessores (successor, action, stepCost) e examina cada um\n sucessores = problem.getSuccessors(curEstado)\n for sucEstado, sucAcao, sucCusto in sucessores:\n novaAcao = todasAcoes + [sucAcao]\n novoCusto = problem.getCostOfActions(novaAcao)\n novoNoh = (sucEstado, novaAcao, novoCusto)\n\n #Checa se o sucessor jah foi visitado\n jah_foi_explorado = False\n for explorado in nohExplorado:\n exEstado, exCusto = explorado\n if (sucEstado == exEstado) and (novoCusto >= exCusto):\n jah_foi_explorado = True\n\n #Se nao foi explorado, coloca na fronteira\n if not jah_foi_explorado:\n fronteira.push(novoNoh, novoCusto + heuristic(sucEstado, problem))\n\n\n return todasAcoes", "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n n = Directions.NORTH\n e = Directions.EAST\n\n result = []\n qu = util.PriorityQueue()\n visited = set([])\n current = (problem.getStartState(), \"\", 0)\n qu.update(current, 0)\n costs = {}\n parents = {}\n parents[problem.getStartState()] = (problem.getStartState(), \"\")\n\n while not qu.isEmpty():\n cost, current = qu.pop()\n visited.add(current[0])\n\n if problem.isGoalState(current[0]):\n result = current[0]\n break\n\n for each in problem.getSuccessors(current[0]):\n if each[0] not in visited:\n qu.update(each, cost + each[2] + heuristic(each[0], problem))\n if each[0] not in costs:\n costs[each[0]] = cost + each[2]\n parents[each[0]] = (current[0], each[1])\n elif costs[each[0]] > cost + each[2] + heuristic(each[0], problem):\n costs[each[0]] = cost + each[2] + heuristic(each[0], problem)\n parents[each[0]] = (current[0], each[1])\n\n path = []\n while parents[result][0] != result:\n path.append(parents[result][1])\n result = parents[result][0]\n\n path.reverse()\n result = []\n for each in path:\n if each == \"South\":\n result.append(s)\n elif each == \"West\":\n result.append(w)\n elif each == \"North\":\n result.append(n)\n elif each == \"East\":\n result.append(e)\n\n return result\n util.raiseNotDefined()\n\n util.raiseNotDefined()", "def heuristic(state, problem):\n # It would take a while for Flat Earther's to get accustomed to this paradigm\n # but hang in there.\n node1 = problem.G.node[state]\n node2 = problem.G.node[problem.end_node]\n xy1 = ((node1['x'],0,0), (node1['y'],0,0))\n xy2 = ((node2['x'],0,0), (node2['y'],0,0))\n return util.points2distance(xy1, xy2)\n # util.raiseNotDefined()", "def aStar(sx = 1.55, sy = 2.05, gx = 1.55, gy = 4.05, d_diagnoal = 14, d_straight = 10, grid_reso = 0.1, grid_width = 6, grid_height = 3):\n width = int(grid_width/grid_reso)\n height = int(grid_height/grid_reso)\n #TODO\n A_sx, A_sy = realPosTogridPos(sx, sy, grid_reso = grid_reso)\n A_gx, A_gy = realPosTogridPos(gx, gy, grid_reso = grid_reso)\n startNode = Node(A_sx,A_sy,None,0,0,0)\n goalNode = Node(A_gx,A_gy,None,0,0,0)\n # The set of nodes already evaluated\n closedSet = set()\n # The set of currently discovered nodes that are not evaluated yet.\n openSet = set()\n # Initially, only the start node is known.\n openSet.add(startNode)\n # For each node, which node it can most efficiently be reached from.If a node can be reached from many nodes, cameFrom will eventually contain the most efficient previous step.\n cameFrom = []\n # For each node, the cost of getting from the start node to that node.\n gScore = create_gScore(width, height)\n start_x = startNode.x\n start_y = startNode.y\n # The cost of going from start to start is zero.\n startNode.g_value = 0\n gScore[start_x][start_y] = 0\n # For each node, the total cost of getting from the start node to the goal by passing by that node. That value is partly known, partly heuristic.\n fScore = create_fScore(width, height)\n # For the first node, that value is completely heuristic.\n startNode.f_value = heuristic_cost_estimate(startNode, goalNode,d_diagnoal,d_straight)\n fScore[start_x][start_y] = heuristic_cost_estimate(startNode, goalNode,d_diagnoal,d_straight)\n while len(openSet) != 0:\n # current := the node in openSet having the lowest fScore[] value\n current = node_lowest_fScore(openSet)\n # If it is the item we want, retrace the path and return it\n if current.equal(goalNode):\n path = reconstruct_path(cameFrom, current) # path in real\n # print \"path\",path\n pathInReal = convertGridPathToReal(path, sx, sy, gx, gy, grid_reso = grid_reso) # path in grid\n return pathInReal\n\n openSet.remove(current)\n closedSet.add(current)\n current_neighbors = getNeighbors(current, width, height)\n current_neighbors_num = current_neighbors.shape[1]\n # for neighbor in current_neighbors:\n for index in range(current_neighbors_num):\n [neighbor_x,neighbor_y] = current_neighbors[:,index]\n neighbor = Node(neighbor_x,neighbor_y,None,np.inf,np.inf,np.inf)\n if neighbor_in_closedSet(neighbor,closedSet):\n continue\n if neighbor_not_in_openSet(neighbor,openSet):\t# Discover a new node\n openSet.add(neighbor)\n\n # The distance from start to a neighbor the \"dist_between\" function may vary as per the solution requirements.\n current_x = current.x\n current_y = current.y\n tentative_gScore = gScore[current_x][current_y] + dist_between(current, neighbor,d_diagnoal,d_straight)\n neighbor_x = neighbor.x\n neighbor_y = neighbor.y\n if tentative_gScore >= gScore[neighbor_x][neighbor_y]:\n continue\t\t# This is not a better path.\n\n neighbor.father = current\n cameFrom.append(neighbor)\n gScore[neighbor_x][neighbor_y] = tentative_gScore\n neighbor.g_value = tentative_gScore\n neighbor_f_value = gScore[neighbor_x][neighbor_y] + heuristic_cost_estimate(neighbor, goalNode,d_diagnoal,d_straight)\n fScore[neighbor_x][neighbor_y] = neighbor_f_value\n neighbor.f_value = neighbor_f_value\n return False", "def manhattan_distance(state, goal):\r\n hval = 0\r\n for index, value in enumerate(state):\r\n if value == 0: # Underestimate by excluding calculation of the blank tile\r\n continue\r\n abs_x = abs((co_ords[index])[0] - (co_ords[goal.index(value)])[0])\r\n abs_y = abs((co_ords[index])[1] - (co_ords[goal.index(value)])[1])\r\n hval += abs_x + abs_y\r\n return hval", "def astar(maze):\n # TODO: Write your code here\\\n\n start = maze.getStart()\n end = maze.getObjectives()[0] # 0 needed so it's not the list it's the end spot\n\n pq = [] # priority queue - filled with tuple of f, x&y, g(path distance from start)\n heapq.heappush(pq, (manhattan_distance(start, end), start, 0))\n\n visited = set()\n map_ = {}\n solvable = True\n at_collectible = None\n\n while len(pq) > 0:\n\n curr = heapq.heappop(pq)\n curr_pos = curr[1]\n\n if curr_pos == end:\n at_collectible = curr\n break\n\n neighbors = maze.getNeighbors(curr_pos[0], curr_pos[1])\n\n for n in neighbors:\n\n new_curr = (manhattan_distance(n, end) + curr[2] + 1, (n[0], n[1]), curr[2] + 1)\n\n if n not in visited and maze.isValidMove(n[0], n[1]):\n map_[new_curr] = curr\n heapq.heappush(pq, new_curr)\n visited.add(n)\n\n curr = at_collectible\n path = []\n while curr[1] != start:\n path.append(curr[1])\n curr = map_[curr]\n path.append(curr[1])\n path.reverse()\n\n return path", "def aStarSearch(problem, heuristic=nullHeuristic):\n \n pq = util.PriorityQueue()\n start = problem.getStartState()\n pq.push(start,heuristic(start,problem))\n cost_so_far = {}\n cost_so_far[start] = 0\n came_from = {}\n came_from[start] = (None,None)\n actions =[]\n\n while not pq.isEmpty() :\n current=pq.pop()\n if problem.isGoalState(current) :\n break\n neighbours = problem.getSuccessors(current)\n for (next,action,cost) in neighbours :\n new_cost = cost_so_far[current] + cost\n if next not in cost_so_far or new_cost < cost_so_far[next] :\n cost_so_far[next] = new_cost\n priority = new_cost + heuristic(next,problem)\n pq.push(next, priority)\n came_from[next] = (current,action)\n\n # exiting the while loop when current == goalstate , now time to trace back !\n while current != start :\n parent,action = came_from[current]\n actions.append(action)\n current = parent\n actions.reverse() \n return actions", "def ant_colony(map, alpha=3, beta=4, m=10, rho=0.2, q=1, its_max=20):\n n = len(map)\n tau = np.ones((n, n))\n eta = 1/map.D\n for i in range(n):\n eta[i, i] = 0\n paths_array = np.zeros((m, n), int)\n its = 0\n path_best = np.zeros((its_max, n), int)\n distance_best = np.zeros(its_max)\n\n while its < its_max:\n paths_length = np.zeros(m)\n for i in range(m):\n source = np.random.randint(n)\n visited = []\n unvisited = list(range(n))\n node_now = source\n node_next = -1\n paths_array[i, 0] = source\n\n for j in range(1, n):\n visited.append(node_now)\n unvisited.remove(node_now)\n prob_roulette = np.array([0]*n, dtype=float)\n for k in unvisited:\n prob_roulette[k] = (pow(tau[node_now, k], alpha)\n * pow(eta[node_now, k], beta))\n prob_roulette = prob_roulette/sum(prob_roulette)\n cum_roulette = prob_roulette.cumsum()\n cum_roulette -= np.random.uniform(0, 1)\n node_next = list(cum_roulette >= 0).index(True)\n paths_array[i, j] = node_next\n paths_length[i] += map.D[node_now, node_next]\n node_now = node_next\n paths_length[i] += map.D[node_now, source]\n\n if its == 0:\n distance_best[its] = paths_length.min()\n path_best[its] = paths_array[paths_length.argmin()].copy()\n else:\n if distance_best[its-1] < paths_length.min():\n distance_best[its] = distance_best[its-1]\n path_best[its] = path_best[its-1].copy()\n else:\n distance_best[its] = paths_length.min()\n path_best[its] = paths_array[paths_length.argmin()].copy()\n\n add_tau = np.zeros((n, n))\n\n for i in range(m):\n for j in range(n):\n row = paths_array[i, j]\n col = paths_array[i, (j+1) % n]\n add_tau[row][col] += q/paths_length[i]\n\n tau = (1 - rho)*tau + add_tau\n\n its += 1\n\n return Hamiltonian(path_best[-1], map)", "def heuristic(self):\r\n # 1.\r\n blacks, whites = 0, 0\r\n weights = [0 for _ in range(6)]\r\n directions = [[-1, -1], [-1, 1], [1, 1], [1, -1]]\r\n user_dir = directions[:2] if self.current_player == 'n' else directions[2:]\r\n for i in range(8):\r\n for j in range(8):\r\n blacks += 1 if self.matrix[i][j] in ['N', 'n'] else 0\r\n whites += 1 if self.matrix[i][j] in ['A', 'a'] else 0\r\n if self.matrix[i][j] == self.current_player or self.matrix[i][j] == self.current_player.upper():\r\n\r\n # numarul de piese rege\r\n if self.matrix[i][j] == self.current_player.upper():\r\n weights[1] += 7.75\r\n\r\n # numarul de piese normale\r\n else:\r\n weights[0] += 5\r\n\r\n # numarul de piese de pe baseline in functie de tipul de piesa\r\n # conform strategiilor de joc este o strategie buna sa ai cat mai multe\r\n # piesa pe baseline pentru a preveni creare de piese de tip rege ale adversarului\r\n if self.current_player in ['n', 'N']:\r\n if i == 7:\r\n weights[2] += 4\r\n elif self.current_player in ['a', 'A']:\r\n if i == 0:\r\n weights[2] += 4\r\n\r\n # numarul de piese din mijlocul tablei\r\n # la fel este o strategie buna pentru atac\r\n if 3 <= i <= 4 and 3 <= j <= 4:\r\n weights[3] += 2\r\n\r\n # numar piese vulnerabile\r\n # adica piese ce pot fi capturate de oponent la urmatoare tura\r\n for d in user_dir:\r\n\r\n vx = d[0] + i\r\n vy = d[1] + j\r\n back_x = i - d[0]\r\n back_y = j - d[1]\r\n next_x, next_y = vx + d[0], vy + d[1]\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(back_x, back_y) and self.matrix[back_x][back_y] == '.':\r\n weights[4] -= 3\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(next_x, next_y) and self.matrix[next_x][next_y] == '.':\r\n # daca elimin o piesa rege este o mutare mai buna\r\n if self.matrix[vx][vy] == self.opponent().upper():\r\n weights[5] += 10\r\n else:\r\n weights[5] += 7\r\n\r\n diff = (blacks - whites) if self.current_player == 'n' else (whites - blacks)\r\n # cand sunt mai putin piese, AI adopta o tactica mai ofensiva\r\n if blacks + whites <= 10:\r\n return sum(weights) + diff\r\n return sum(weights)", "def myHeuristic(state, problem=None):\n #print(\"myHeuristic\")\n #print(problem.isGoalState((1,1)))\n xy2 = problem.goal\n return abs(state[0] - xy2[0]) + abs(state[1] - xy2[1])", "def uniformCostSearch(problem):\n # Initialization\n startState = problem.getStartState()\n\n if problem.isGoalState(startState):\n return [] # No action needed\n\n closedSet = set()\n queue = util.PriorityQueue()\n queue.push((startState, None, 0), 0)\n cameFrom = dict() # Stores most efficient previous action\n gScore = dict() # Stores current cost from start\n gScore[startState] = 0\n\n # Search\n while queue.heap: # Do while open set is not empty\n (currentState, action, cost) = queue.pop()\n\n if problem.isGoalState(currentState):\n # Goal reached. Construct path\n path = util.Queue() \n \n # Backtrack to start state\n while currentState is not startState and currentState in cameFrom:\n currentState, action = cameFrom[currentState]\n path.push(action)\n\n return path.list\n\n # Expand current state\n closedSet.add(currentState) \n for successor in problem.getSuccessors(currentState):\n successorState, successorAction, successorCost = successor\n \n if successorState in closedSet:\n continue # Skip already expanded states\n \n # Initialize entries not already in dictionaries to a big number\n if currentState not in gScore:\n gScore[currentState] = 999999999999\n if successorState not in gScore:\n gScore[successorState] = 999999999999\n\n # Compare this path to best path\n gTentative = gScore[currentState] + successorCost\n if gTentative >= gScore[successorState]:\n continue # Not a better path\n\n # A better path is found, store this path\n cameFrom[successorState] = (currentState, successorAction)\n gScore[successorState] = gTentative # Store new cost\n # Update the priority queue\n queue.update(successor, gScore[successorState])", "def minMoves(maze, x, y):\n\n def maze_guard():\n \"\"\"Guard function to block oversized dimensions\"\"\"\n cell_guard = all([1 <= len(row) <= 100 for row in maze])\n row_guard = 1 <= len(maze) <= 100\n return cell_guard and row_guard\n\n def walk_maze(finish):\n \"\"\"Walks the maze, finding the shortest path including all coins.\n Finishes when reach the coordenate finish, a tuple with row and\n column numbers\n \"\"\"\n i, j = (0, 0)\n result = -1\n weight = -1\n while nodes:\n i, j, path, coins = nodes.popleft()\n cell = maze[i][j]\n if (i, j) == finish:\n weight, result = check_result(coins, path, weight, result)\n elif cell != 1:\n adjacent_nodes(i, j, path, coins)\n\n return result\n\n def adjacent_nodes(i, j, path, coins):\n \"\"\"Adds the node in positions i, j, with its path added to\n accumulated path. The path is transformed into a binary\n number, i.e, 2 ** (i * n + j), being n the number of rows\n in the maze matrix.\n \"\"\"\n def neighbour(x, y):\n this_path = 2 ** (i * n + j)\n if not this_path & path:\n coin = coins + 1 if maze[i][j] == 2 else coins\n nodes.append((x, y, path + this_path, coin))\n\n coord = [(i + 1, j, i + 1 < n), (i - 1, j, i - 1 >= 0),\n (i, j + 1, j + 1 < m), (i, j - 1, j - 1 >= 0)]\n _ = [neighbour(x, y) for x, y, test in coord if test]\n\n if not maze_guard():\n return -1\n\n n = len(maze)\n m = len(maze[0])\n nodes = deque([(0, 0, 0, 0)])\n return walk_maze((x, y))", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n frontier = util.Queue()\n start = problem.getStartState()\n record = [] # gonna use dictionary to keep track of movements\n frontier.push(start)\n explored = [start]\n\n location = 0 # to remember which successor part im accessing\n action = 1\n\n while not frontier.isEmpty():\n current_location = frontier.pop()\n print(current_location)\n\n if problem.isGoalState(current_location):\n break\n\n\n for each in problem.getSuccessors(current_location):\n if each[location] not in explored:\n frontier.push(each[location])\n record.append({'From': current_location, 'To': each[location], 'By': each[action]})\n explored.append(each[location])\n\n while not problem.isGoalState(record[-1]['To']): # loop removes last couple of movements which don't lead to goal\n record.remove(record[-1])\n\n x = len(record)\n while x - 1 != 0: # loop clears out actions that dont come from previous position\n if record[x - 1]['From'] != record[x - 2]['To']: # starts from goal and works backwards\n record.remove(record[x - 2])\n x = len(record)\n else:\n x -= 1\n\n return [path['By'] for path in record]\n\n return []", "def breadthFirstSearch(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n node = problem.getStartState()\r\n if (problem.isGoalState(node)):\r\n return [] # no need to make any moves of the start state is goal\r\n start = (node, 'NoDirection',0)\r\n\r\n frontier_queue = Queue() # queue for frontier\r\n frontier_queue.push(start) # frontier consists of only the start state\r\n\r\n explored_nodes = set()\r\n explored_track = {start:None} # keep a track of parent, parent of root node is None\r\n\r\n while not frontier_queue.isEmpty():\r\n state = frontier_queue.pop() # pop the top element from the queue \r\n explored_nodes.add(state)\r\n\r\n if problem.isGoalState(state[0]):\r\n return get_track(explored_track, state)\r\n\r\n neighbors_state = problem.getSuccessors(state[0])\r\n for neighbor in neighbors_state: # neighbor will be something like this ((34, 15), 'South', 1)\r\n if neighbor not in frontier_queue.list and neighbor not in explored_nodes:\r\n frontier_queue.push(neighbor)\r\n explored_track[neighbor] = state\r\n\r\n\r\n def get_track(explored_track, state):\r\n from game import Directions\r\n track_history = [state]\r\n track_history_direction = []\r\n leaf = state\r\n while (explored_track[leaf]) != start:\r\n track_history.append(explored_track[leaf])\r\n leaf = explored_track[leaf]\r\n\r\n for j in range (len(track_history),-1,-1):\r\n this_step = track_history[j-1]\r\n this_step = this_step[1]\r\n track_history_direction.append(this_step)\r\n return track_history_direction[:-1]", "def utility(state:State,maximizing_player):\n best_move_score = -1\n #######################[Goal]#########################\n is_current_player_stuck = is_stuck(state,state.player_type)\n other_player = RIVAL if state.player_type == PLAYER else PLAYER\n # Check if stuck\n if is_current_player_stuck:\n if state.player_type == PLAYER:\n state.players_score[state.player_type] -= state.penalty_score\n else:\n state.players_score[state.player_type] += state.penalty_score\n return state.players_score[state.player_type] - state.players_score[other_player] \n ######################################################\n # Else\n #--------------------------------------------------\n ################# Available Steps #################\n #--------------------------------------------------\n player_available_steps = availables(state.board, state.locations[PLAYER])\n h1 = 4-player_available_steps\n h4 = player_available_steps\n #--------------------------------------------------\n ################# Fruits Distance #################\n #--------------------------------------------------\n h2 = -1\n if state.fruits_ttl > 0 and len(state.fruits_dict) > 0:\n min_fruit_dist = float('inf')\n for fruit_loc in state.fruits_dict:\n curr_fruit_dist = Manhattan(state.locations[state.player_type], fruit_loc)\n # Check what is the closest fruit reachable\n if curr_fruit_dist < min_fruit_dist and curr_fruit_dist <= state.fruits_ttl:\n other_player_fruit_dist = Manhattan(state.locations[other_player], fruit_loc)\n if curr_fruit_dist < other_player_fruit_dist:\n min_fruit_dist = curr_fruit_dist\n max_dist = len(state.board)+len(state.board[0])\n h2 = (max_dist*10.0/min_fruit_dist)+1 if min_fruit_dist < float('inf') else -1\n #--------------------------------------------------\n ################# Reachable Squrs #################\n #--------------------------------------------------\n reachables_player = reachables(state.board,state.locations[PLAYER])\n reachables_rival = reachables(state.board,state.locations[RIVAL])\n h3 = reachables_player - reachables_rival # We want more for us\n #--------------------------------------------------\n ################# Combine it all. #################\n #--------------------------------------------------\n if not state.half_game():\n w = 0.8 if h2 > 0 else 1\n best_move_score = w*(h1-h3) + (1-w)*h2 \n else:\n w = 0.7 if h2 > 0 else 1\n best_move_score = w*(h4+h3) + (1-w)*h2 \n\n best_move_score += state.players_score[state.player_type]\n return best_move_score", "def calculate_made_up_dist(self):\n\n # Ensure if current state equals goal, cost is only the current cost\n if self._goal_loc == self._current_loc:\n return self._current_cost\n\n # Distance is at least the Manhattan distance as cannot move diagonal\n estimated_distance = self.calculate_manhattan_dist()\n\n # Assume two board parts in the priority queue have the same weight.\n # For those board paths with higher actual cost and lower heuristic\n # cost, there is more assurance in the accuracy of the actual cost\n # than in the heuristic cost. Give a very small penalty (i.e. less\n # than one step) to prefer a path with a higher known cost than a\n # path with a higher heuristic cost.\n # Extract the number of portion of the move cost from the heuristic\n heuristic_cost = estimated_distance - self._current_cost\n # Heuristic cost penalty is normalized to a maximum of 0.1 steps\n # This is achieved by dividing the heuristic cost by the size of the\n # board. Since the heuristic cost can never be larger than the board\n # size, this quotient is less than or equal to 1. To normalize to a\n # maximum of 0.1, just multiply the number by 0.1. This is than added\n # to the estimated distance determined so far.\n heuristic_cost_penalty = 0.1 * heuristic_cost\n heuristic_cost_penalty /= BoardPath._traversed_board_size\n # Add what is essentially an \"uncertainty penalty\"\n estimated_distance += heuristic_cost_penalty\n\n # In case where all neighboring spaces are blocked or already\n # traversed, then set the path cost prohibitively large so it is\n # given minimum priority.\n if not (self.is_move_valid(\"d\", BoardPath._traversed_board)) \\\n and not (self.is_move_valid(\"u\", BoardPath._traversed_board)) \\\n and not (self.is_move_valid(\"l\", BoardPath._traversed_board)) \\\n and not (self.is_move_valid(\"r\", BoardPath._traversed_board)):\n # Total board area is sufficient as a prohibitive distance\n estimated_distance += BoardPath._traversed_board_size\n return estimated_distance\n\n # If all next steps that load directly to the goal are blocked, then\n # it takes at least two additional moves to get around the blocked\n # paths it (due to an obstacle or already traversed square) so add\n # two to the estimated distance to include that cost.\n if self._is_all_direct_next_moves_blocked(BoardPath._traversed_board):\n estimated_distance += 2\n\n # In a heap, if two nodes have the same cost, the object that was\n # put into the heap first in many implementations will be on top of the\n # heap. To make the algorithm more efficient, apply a slight penalty to\n # a non valid solution to ensure if an invalid solution and a valid\n # solution have the same cost that the valid solution would always be\n # on top of the heap. This is done by giving all non-valid solutions a\n # penalty term that is greater than zero and less than the minimum step\n # size (e.g. in this case 0 < 0.1 < 1).\n estimated_distance += 0.1\n\n # Return estimated distance\n return estimated_distance", "def solve(self):\n smallest_f = self.get_smallest_f_cost_unvisited_node()\n smallest_f_node = smallest_f[0]\n\n if smallest_f[1] > 1:\n current_node = self.get_smallest_h_cost_unvisited_node()\n else:\n current_node = smallest_f_node\n if current_node.f_cost == self.inf:\n return\n\n self.set_h_cost(current_node)\n self.unvisited_pos.remove(current_node.pos)\n self.visited_pos.append(current_node.pos)\n neighbours = algo_utils.get_neighbours(current_node, self.grid, self.wall_pos)\n\n for neigh in neighbours:\n neighbour_dist = neigh.g_cost\n current_dist = current_node.g_cost\n new_dist = current_dist + 1\n if neighbour_dist < new_dist:\n continue\n neigh.g_cost = new_dist\n self.set_h_cost(neigh)\n mix_neigh = {neigh.pos: neigh.g_cost}\n self.mix.update(mix_neigh)\n mix_current = {current_node.pos: current_node.g_cost}\n self.mix.update(mix_current)\n\n smallest_f = self.get_smallest_f_cost_unvisited_node()\n smallest_f_node = smallest_f[0]\n smallest_h_node = self.get_smallest_h_cost_unvisited_node()\n\n if (\n self.end_pos not in self.unvisited_pos\n or algo_utils.get_smallest_g_cost_unvisited_node(\n self.grid, self.unvisited_pos\n ).g_cost\n == self.inf\n ):\n for key, value in self.mix.items():\n self.mix[key] = round((value * 1.0) / self.end_node.g_cost, 3)\n self.backtrack_path(self.end_node)\n else:\n if smallest_f[1] > 1:\n current_node = smallest_h_node\n else:\n current_node = smallest_f_node\n self.solve()", "def heuristic(state, puzzle):\n h = 0\n for i in range(puzzle.dimension):\n for j in range(puzzle.dimension):\n # (0, 0) -> 1 as value, (0, 2) -> 3 as value, etc\n value = i * puzzle.dimension + j + 1\n if value == puzzle.dimension ** 2: # value is ' '\n value = ' '\n current_position = puzzle.get_coordinates(state, value)\n goal_position = (i, j)\n h += util.manhattanDistance(current_position, goal_position)\n h /= 2\n return h", "def foodHeuristic(state, problem):\n util.raiseNotDefined()", "def compute_heuristic(self, state):\n if self._shape_reward_mode == \"optimal\":\n problem = self.problems[self._problem_idx]\n\n # Add action literals to state to enable planning\n state_lits = set(state.literals)\n action_lits = set(\n self.action_space.all_ground_literals(state, valid_only=False))\n state_lits |= action_lits\n\n problem_path = \"\"\n try:\n # generate a temporary file to hand over to the external planner\n fd, problem_path = tempfile.mkstemp(dir=TMP_PDDL_DIR, text=True)\n with os.fdopen(fd, \"w\") as f:\n problem.write(f, initial_state=state_lits, fast_downward_order=True)\n\n return get_fd_optimal_plan_cost(\n self.domain.domain_fname, problem_path)\n finally:\n try:\n os.remove(problem_path)\n except FileNotFoundError:\n pass\n else:\n return self._heuristic(state)", "def myHeuristic2(state, problem=None):\n #print(\"myHeuristic2\")\n #print(problem.isGoalState((1,1)))\n xy2 = problem.goal\n return ( (state[0] - xy2[0]) ** 2 + (state[1] - xy2[1]) ** 2 ) ** 0.5", "def astar(maze):\n # TODO: Write your code here\n gFunction = {}\n frontier = PriorityQueue()\n path = []\n ret = []\n objectives = maze.getObjectives()\n start = State(maze.getStart()[0], maze.getStart()[1], objectives[0])\n gFunction[start] = 0\n frontier.put(start)\n\n while not frontier.empty():\n\n currentState = frontier.get()\n currentCell = currentState.cell()\n\n # objective found, initialise backtrace and exit search\n if maze.isObjective(currentCell[0], currentCell[1]):\n\n path.append(currentState)\n ret.append(currentCell)\n break\n\n neighbors = maze.getNeighbors(currentCell[0], currentCell[1])\n\n for i in neighbors:\n\n neighbor = State(i[0], i[1], objectives[0])\n gVal= gFunction[currentState]+1\n\n # if neighbor is not visited or if we found better path to it, add it to the frontier\n if neighbor not in gFunction or gVal < gFunction[neighbor]:\n neighbor.setParent(currentState)\n gFunction[neighbor] = gVal\n hFunction = abs(objectives[0][0] - i[0]) + abs(objectives[0][1] - i[1]) # use manhatten distance as heuristic\n neighbor.setfFunction(gFunction[neighbor] + hFunction)\n frontier.put(neighbor)\n\n # backtrace\n while path[0]!= start:\n \n currentCell = path[0]\n path.insert(0, currentCell.parent())\n ret.insert(0, currentCell.parent().cell())\n\n return ret", "def customBreadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n i = 0\n dirList = []\n closed = util.Counter()\n fringe = util.Queue()\n state = problem.getStartState()\n followPac = []\n closed[hash(state)] = 1\n\n for triple in problem.getSuccessors(state):\n fringe.push((triple, dirList.copy()))\n while not fringe.isEmpty():\n i += 1\n state = fringe.pop()\n succ = state[0][0]\n act = state[0][1]\n cost = state[0][2]\n dirList = state[1]\n dirList.append(act)\n \n if problem.isGoalState(succ):\n return dirList\n if problem.isPacman(succ):\n followPac.append(dirList.copy())\n if closed[hash(succ)] == 0:\n closed[hash(succ)] = 1\n for triple in problem.getSuccessors(succ):\n fringe.push((triple, dirList.copy()))\n if not followPac:\n return\n followPac = max(followPac, key=lambda x: len(x))\n last = followPac.pop()\n followPac.append(last)\n followPac.append('place')\n followPac.append(reverse[last])\n return followPac.copy()", "def a_star(self, mapdata, start, goal):\n\n print \"Inside A star\"\n rospy.loginfo(\"Generate path from (%d,%d) to (%d,%d)\" % (start[0], start[1], goal[0], goal[1]))\n if not PathPlanner.is_cell_walkable(mapdata, goal[0], goal[1]):\n rospy.logerr(\"not walkable goal\")\n return[]\n #calculated from goal\n frontier = PriorityQueue()\n frontier.put(start, 0)\n came_from = {}\n cost_so_far = {}\n came_from[start] = None\n cost_so_far[start] = 0\n\n while not frontier.empty():\n frontier_msg = GridCells()\n frontier_cells = []\n for e in frontier.elements:\n frontier_cells.append(PathPlanner.grid_to_world(mapdata, e[1][0], e[1][1]))\n frontier_msg.header = mapdata.header\n frontier_msg.header.stamp = rospy.get_rostime()\n frontier_msg.cell_width = mapdata.info.resolution\n frontier_msg.cell_height = mapdata.info.resolution\n frontier_msg.cells = frontier_cells\n expanded_msg = GridCells()\n expanded_cells = []\n for e in cost_so_far: \n expanded_cells.append(PathPlanner.grid_to_world(mapdata, e[0], e[1]))\n \n expanded_msg.header = mapdata.header\n expanded_msg.header.stamp = rospy.get_rostime()\n expanded_msg.cell_width = mapdata.info.resolution\n expanded_msg.cell_height = mapdata.info.resolution\n expanded_msg.cells = expanded_cells\n self.expanded_pub.publish(expanded_msg)\n rospy.sleep(0.01)\n\n current = frontier.get()\n\n #creates path\n if current == goal:\n entry = goal\n listOfCoord = []\n while entry != None:\n listOfCoord.append(entry)\n entry = came_from[entry]\n listOfCoord.reverse()\n self.expanded_pub.publish(PathPlanner.createGridcells(mapdata, listOfCoord))\n return listOfCoord\n \n for next in PathPlanner.neighbors_of_8(mapdata, current[0], current[1]):\n new_cost = cost_so_far[current] + 1 #assume cost to move each unit is 1\n if next not in cost_so_far or new_cost < cost_so_far[next]:\n cost_so_far[next] = new_cost\n priority = new_cost + PathPlanner.euclidean_distance(next[0], next[1], goal[0], goal[1])\n frontier.put(next, priority)\n came_from[next] = current\n\n \n return[]", "def heuristic_1(node):\n x_node, y_node = node.state.location()\n goals = node.state.grid.components.white_walkers\n goals.append(node.state.grid.components.dragon_stone)\n distance = [np.sqrt((x_node - x)**2 + (y_node - y)**2) for x, y in goals]\n return distance[np.argmin(distance)]", "def a_star(grid, start, end, heuristic_cost=manhattan_heuristic_cost):\n # the set of cells already evaluated\n closed_set = set()\n\n # the set of cells already discovered\n open_set = set()\n open_set.add(start)\n\n # for each cell, mapping to its least-cost incoming cell\n prev = {}\n\n # for each node, cost of reaching it from start (g_cost)\n # for each node, cost of getting from start to dest via that node (f_cost)\n # note: cell->dest component of f_cost will be estimated using a heuristic\n g_cost = {}\n f_cost = {}\n for r in range(len(grid)):\n for c in range(len(grid[0])):\n cell = (r, c)\n g_cost[cell] = inf\n f_cost[cell] = inf\n g_cost[start] = 0\n f_cost[start] = heuristic_cost(start, end)\n\n while open_set:\n # node in open set with min fscore\n curr = node_with_min_fscore(open_set, f_cost)\n\n # if we've reached the destination\n if curr == end:\n return reconstruct_path_to_destination(prev, curr)\n\n open_set.remove(curr)\n closed_set.add(curr)\n\n for neighbor, cost in get_successors(curr, grid):\n # ignore neighbors which have already been evaluated\n if neighbor in closed_set:\n continue\n\n curr_g_score = g_cost[curr] + cost\n # add neighbor to newly discovered nodes\n if neighbor not in open_set:\n open_set.add(neighbor)\n\n # if we've already got a lower g_score for neighbor, then move on\n elif curr_g_score >= g_cost[neighbor]:\n continue\n\n prev[neighbor] = curr\n g_cost[neighbor] = curr_g_score\n f_cost[neighbor] = g_cost[neighbor] + heuristic_cost(neighbor, end)\n\n # if we get to this point, it's not possible to reach the end destination\n return []", "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n # Priority Queue to hold the node along with the path taken from the start node to reach that node\n pqueue = PriorityQueue()\n #Set to hold the node explored.\n explorednode = set()\n # Get the start node.\n startnode = problem.getStartState()\n # Push the starting node on the Queue along with an empty set to know the direction in order to reach the node.\n pqueue.push((startnode, []), 0)\n\n # Loop till the priority queue is empty\n while pqueue.isEmpty() is not True:\n # Pop the currentnode and the direction from the priority queue\n (currentnode, direction) = pqueue.pop()\n # Check if the currentnode is not in the explored node.\n if currentnode not in explorednode:\n # We will now add the node to set of explored node.\n explorednode.add(currentnode)\n # If the node is the goal. We made it!!\n if problem.isGoalState(currentnode):\n # The direction holds the way to reach till the goal from the start node.\n return direction\n # Loop for each successor(child) of the current node.\n for (successor, action, stepCost) in problem.getSuccessors(currentnode):\n # Add the successor to the queue along with the path to reach it.\n if successor not in explorednode:\n # Add the successor to the queue along with the path to reach it.\n pqueue.push((successor, direction + [action]), problem.getCostOfActions(direction + [action]) + heuristic(successor, problem))\n util.raiseNotDefined()", "def change_way(coins, opponentLocation, player_location):\n global best_weight, best_path\n dist_matrix, route_matrix = u.update_dists_from_each(dists_matrix, routes_matrix, player_location, mazeMap, coins)\n coins_to_search = get_n_shortest(5, coins, player_location, dists_matrix)\n ennemy_dists = algo.dijkstra(mazeMap, opponentLocation)\n for c in coins_to_search:\n if len(coins_to_search) >= 2 and ennemy_dists[1][c] < dists_matrix[player_location][c]:\n coins_to_search.remove(c)\n break\n best_weight = float(\"inf\")\n best_path = []\n api.debug(coins_to_search)\n exhaustive(coins_to_search, player_location, [], 0, dist_matrix)\n meta_route = [player_location] + best_path\n api.debug(meta_route)\n route = u.location_list_to_route(meta_route, route_matrix)\n \n return coins_to_search, meta_route, route, dist_matrix[player_location][meta_route[1]]", "def astar_corner(maze):\n # TODO: Write your code here\n gFunction = {}\n frontier = PriorityQueue()\n path = []\n ret = []\n objectives = maze.getObjectives()\n start = State(maze.getStart()[0], maze.getStart()[1], objectives)\n gFunction[start] = 0\n frontier.put(start)\n\n while not frontier.empty():\n\n currentState = frontier.get()\n currentCell = currentState.cell()\n objectivesLeft = currentState.objectives()\n\n if objectivesLeft.count(currentCell) != 0:\n objectivesLeft.remove(currentCell)\n\n # all objectives found, initialise backtrace and exit loop\n if len(objectivesLeft) == 0:\n path.clear()\n ret.clear()\n path.append(currentState)\n ret.append(currentCell)\n break\n\n neighbors = maze.getNeighbors(currentCell[0], currentCell[1])\n\n for i in neighbors:\n\n neighbor = State(i[0], i[1], objectivesLeft)\n gVal= gFunction[currentState] + 1\n\n # if neighbor is not visited or if we found better path to it, add it to the frontier\n if neighbor not in gFunction or gVal < gFunction[neighbor]:\n\n neighbor.setParent(currentState)\n gFunction[neighbor] = gVal\n\n hFunction = 0\n for j in objectivesLeft:\n hFunction += abs(j[0] - i[0]) + abs(j[1] - i[1]) # use sum of manhatten distances to corners as heuristic\n\n neighbor.setfFunction(gFunction[neighbor] + hFunction)\n frontier.put(neighbor)\n\n # backtrace\n while path[0]!= start:\n \n currentCell = path[0]\n path.insert(0, currentCell.parent())\n ret.insert(0, currentCell.parent().cell())\n\n return ret", "def AStar_Modified(maze: list, start: tuple, goal: tuple, alpha: float, q: float, fire_start: tuple):\n n = len(maze) # Get the dimension of the maze\n\n #========================================#\n # Some data checking statements\n\n if (not is_valid(start, n)):\n print(\"AStar_Modified: Start indices outside maze dimensions\")\n return False\n elif (not is_valid(goal, n)):\n print(\"AStar_Modified: Goal indices outside maze dimensions\")\n return False\n\n # End data checking statements\n #========================================#\n\n number_of_nodes_visited = 0\n # We can use a simple visited matrix since the heuristic (euclidean distance) is both admissible AND consistent\n visited = copy.deepcopy(maze) # We can use a copy of the maze to keep track of visited squares (Considered using a set here, thought that time efficiency was important)\n # visited = list(map(list, maze)) # Alternative to using copy.deepcopy\n\n g_cost = [[float('inf') for i in range(n)] for j in range(n)] # Initialize a matrix of the same size as maze where each value is 'infinity'.\n # f_cost = [[float('inf') for i in range(n)] for j in range(n)] # Initialize a matrix of the same size as maze where each value is 'infinity'.\n previous = [[None for i in range(n)] for j in range(n)] # Initialize a matrix of the same size as maze where each value is None.\n\n maze_future = copy.deepcopy(maze) #create maze that stores probabilities of where fire will be in next step\n maze_future = advance_fire_probability(maze_future,q) #calculate future probabilities\n\n heap = [] # Define our 'heap' which is just a list, but all pushes and pops will be through the heapq library.\n \n heapq.heappush(heap, (0, start)) # Push our start onto the heap. It's ok for this to have 0 'f' value since it'll be immediately popped off anyway.\n g_cost[start[0]][start[1]] = 0\n # f_cost[start[0]][start[1]] = euclidean_distance(start, goal)\n\n while (len(heap)): # While there exists items in the queue\n min_value = heapq.heappop(heap) # Pop the square with lowest 'f' value from our heap.\n number_of_nodes_visited += 1 # Increase number of nodes visited\n\n # if (visited[current[0]][current[1]] == False): # If we have not visited this node\n # visited[start[0]][start[1]] = 1 # Set it to visited\n\n current_f, current = min_value\n\n if (current == goal): # If current is the goal, we found it!\n # We now want to traverse back to make a path using our 'previous' matrix\n path = []\n while (current != None):\n path.append(current)\n current = previous[current[0]][current[1]]\n path.reverse()\n return (True, path, number_of_nodes_visited)\n\n current_i, current_j = current # Unpack the current pair\n \n # Now we want to add all unvisited squares that are possible to get to from the current square\n for i in range(len(nearby_offsets)):\n offset_i, offset_j = nearby_offsets[i]\n possible = (current_i + offset_i, current_j + offset_j)\n # print(f\"Current possible: {possible_i} {possible_j}\") # DEBUG\n if (is_valid(possible, n)): # If the calculated square is within the maze matrix\n if (maze[possible[0]][possible[1]]): # If there is something there\n continue\n # Check to see if this path is better (just need to check g_cost since h_cost is always the same)\n possible_g_cost = g_cost[current[0]][current[1]] + 1\n\n #KEY MODIFICATION: Increase cost of moving to a node if there is a probability it will be on fire next turn\n possible_g_cost += alpha*maze_future[possible[0]][possible[1]]\n if (possible_g_cost < g_cost[possible[0]][possible[1]]): # If the cost is indeed less\n previous[possible[0]][possible[1]] = current\n g_cost[possible[0]][possible[1]] = possible_g_cost\n # Check to see if the node is in the heap, and if it is not, put it in.\n if (not visited[possible[0]][possible[1]]):\n heapq.heappush(heap, (possible_g_cost + euclidean_distance(possible, goal), possible))\n visited[possible[0]][possible[1]] = 1\n \n # found = False\n # for (f_cost, (square_i, square_j)) in heap:\n # if (square_i == possible[0] and square_j == possible[1]):\n # found = True\n # break\n # if (not found):\n # heapq.heappush(heap, (possible_g_cost + euclidean_distance(possible, goal), possible))\n\n # if (visited[possible[0]][possible[1]]): # If this node has already been visited\n # # Check to see if this path is better (just need to check g_cost since h_cost is always the same)\n # if (f_cost[possible[0]][possible[1]] > possible_f_cost):\n # heapq.heappush(heap, (possible_f_cost, possible)) # Push this back onto the heap for re-examination\n # f_cost[possible[0]][possible[1]] = possible_f_cost # Assign the new f-cost\n # previous[possible[0]][possible[1]] = current # Update previous\n # else\n return (False, [], number_of_nodes_visited) # If the while loop goes out, and the queue is empty, then there is no possible path", "def h(self, state):\n loc = dict((val, (i, j)) for i, row in enumerate(self.goal_state)\n for j, val in enumerate(row))\n\n def calculate_cost(i, j):\n val = state[i][j]\n goal_i, goal_j = loc[val]\n return abs(goal_i - i) + abs(goal_j - j)\n\n return sum(\n calculate_cost(i, j) for i in xrange(self.size)\n for j in xrange(self.size) if state[i][j])", "def search(start):\n\n '''\n Create a class named nodeClass which contains 4 elements: \n state: The puzzle object containing the puzzle board at the node \n misplaced: num of misplaced tiles\n depth: depth of the node in the tree \n prev: parent node\n '''\n nodeClass = namedtuple('nodeClass', 'state, misplaced, depth, prev')\n\n #instantiate object from class creating the root node\n node = nodeClass(start, 0, 0, None)\n\n #stores the nodes that are going to be explored. \n #the node with lower f-score is explored first\n frontier = q.PriorityQueue()\n frontier.put((0,node))\n\n # frontier_set keep track of the nodes in the frontier queue\n frontier_set = {node}\n #contains the board states already explored\n explored_states = set()\n for ite in range(1,max_iterations+2):#while True:\n #Retrieve the node in the frontier with lowest value\n node = frontier.get()[1]\n\n #get the puzzle board obj from the node object\n state = node.state\n\n #Check if the game has ben solved\n if state.solved or ite==max_iterations:\n Result = namedtuple('Result', 'board, depth, nodesExpanded, max_depth, isSolved')\n return Result(state, node.depth, ite, max(no.depth for no in frontier_set), state.solved)\n\n # expanded nodes are added to explored set\n explored_states.add(state)\n\n #EXPANDING\n for mov in state.possible_moves:\n new_state=state.move(mov)\n new_node = nodeClass(new_state, new_state.score,\n node.depth + 1, node)\n\n #compute f-score of the node\n f_score=new_state.score + new_node.depth\n\n if new_state not in explored_states and new_node not in frontier_set:\n frontier.put((f_score,new_node))\n frontier_set.add(new_node)", "def use_manhatten_heur(self):\r\n\t\tdistance = 0\r\n\r\n\t\tfor row in range(self.n):\r\n\t\t\tfor col in range(self.n):\r\n\t\t\t\tintendedX, intendedY = BoardClass.goalTileLocations[self.board[row][col]]\r\n\t\t\t\tdistance += (abs(row - intendedX) + abs(col - intendedY))\r\n\r\n\t\tself.heuristic = distance", "def heuristic(self, state: ODState) -> int:\n h = 0\n if self.assigned_goals is None:\n for agent in state.new_agents:\n h += self.grid.get_heuristic(agent.coords, agent.color)\n for j in range(len(state.new_agents), len(state.agents)):\n h += self.grid.get_heuristic(state.agents[j].coords, state.agents[j].color)\n else:\n for agent in state.new_agents:\n h += self.grid.get_heuristic(agent.coords, self.assigned_goals[agent.id])\n for j in range(len(state.new_agents), len(state.agents)):\n h += self.grid.get_heuristic(state.agents[j].coords, self.assigned_goals[state.agents[j].id])\n return h", "def manhattan_heuristic_cost(curr, end):\n curr_x, curr_y = curr\n end_x, end_y = end\n return abs(curr_x-end_x) + abs(curr_y-end_y)", "def minimumEffortPath(self, heights: List[List[int]]) -> int:\n m, n = len(heights), len(heights[0])\n\n def diff(i, j, _i, _j):\n return abs(heights[i][j] - heights[_i][_j])\n\n max_diff = 0\n for i in range(m):\n for j in range(n):\n for dx, dy in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n _i, _j = i+dx, j+dy\n if 0<=_i<m and 0<=_j<n:\n max_diff = max(max_diff, diff(i, j, _i, _j))\n\n @lru_cache(None)\n def dfs(i, j, remain, k):\n if i == m-1 and j == n-1:\n return True\n for dx, dy in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n _i, _j = i+dx, j+dy\n if 0<=_i<m and 0<=_j<n:\n bit = 1<<(_i*n + _j)\n if remain&bit and diff(i, j, _i, _j) <= k:\n if dfs(_i, _j, remain^bit, k):\n return True\n return False\n\n def bisearch(s, e, func):\n while s <= e:\n p = s + (e-s)//2\n if func(p):\n e = p-1\n else:\n s = p+1\n return e+1\n\n return bisearch(0, max_diff, lambda k: dfs(0, 0, (1<<(m*n))-1, k))", "def myHeuristic3(state, problem=None):\n #canto =[(1,1), (1,2), (2,1), (2,2), (36,36), (35,36), (35,35), (36,35), (1,36),(1,35),(2,36),(2,35),(36,1),(36,2),(35,1),(35,2)]\n canto = []\n for l in range(2):\n for c in range(2):\n canto.append((l,c))\n heru = abs(state[0] - 1) + abs(state[1] - 1)\n #heru=10\n if state in canto:\n #print(\"sim\")\n heru = heru * 0.5\n return heru", "def get_shortest_path(self, r_start, r_goal):\n neighbors = [(0, 1), (0, -1), (1, 0), (-1, 0), (1, 1), (1, -1), (-1, 1), (-1, -1)]\n start = (int(r_start[0] / Map.RESOLUTION), int(r_start[1] / Map.RESOLUTION))\n goal = (int(r_goal[0] / Map.RESOLUTION), int(r_goal[1] / Map.RESOLUTION))\n close_set = set()\n came_from = {}\n gscore = {start: 0}\n fscore = {start: Map._heuristic(start, goal)}\n oheap = []\n\n heappush(oheap, (fscore[start], start))\n\n while oheap:\n current = heappop(oheap)[1]\n\n if current == goal:\n data = []\n while current in came_from:\n data.append((int((current[0] * Map.RESOLUTION) + (Map.RESOLUTION / 2)),\n int((current[1] * Map.RESOLUTION) + (Map.RESOLUTION / 2))))\n current = came_from[current]\n data.reverse()\n return data\n\n close_set.add(current)\n for i, j in neighbors:\n neighbor = current[0] + i, current[1] + j\n tentative_g_score = gscore[current] + Map._heuristic(current, neighbor)\n if 0 <= neighbor[0] < self.col_grid.shape[0]:\n if 0 <= neighbor[1] < self.col_grid.shape[1]:\n if self.col_grid[neighbor[0]][neighbor[1]] == 1:\n continue\n else:\n # array bound y walls\n continue\n else:\n # array bound x walls\n continue\n\n if neighbor in close_set and tentative_g_score >= gscore.get(neighbor, 0):\n continue\n if tentative_g_score < gscore.get(neighbor, 0) or neighbor not in [i[1] for i in oheap]:\n came_from[neighbor] = current\n gscore[neighbor] = tentative_g_score\n fscore[neighbor] = tentative_g_score + Map._heuristic(neighbor, goal)\n heappush(oheap, (fscore[neighbor], neighbor))\n\n return []", "def a_star_search(grid, start, end, heuristic_cost=euclidean_cost):\n # the set of cells already evaluated\n closed_set = set()\n\n # the set of cells already discovered\n open_set = set()\n open_set.add(start)\n\n # for each cell, mapping to its least-cost incoming cell\n prev = {}\n\n # for each node, cost of reaching it from start (g_cost)\n # for each node, cost of getting from start to dest via that node (f_cost)\n # note: cell->dest component of f_cost will be estimated using a heuristic\n g_cost = {}\n f_cost = {}\n for cell in product(range(len(grid)), range(len(grid[0]))):\n g_cost[cell] = inf\n f_cost[cell] = inf\n g_cost[start] = 0\n f_cost[start] = heuristic_cost(start, end)\n\n while open_set:\n # node in open set with min fscore\n curr = node_with_min_fscore(open_set, f_cost)\n\n # if we've reached the destination\n if curr == end:\n return reconstruct_path_to_destination(prev, curr)\n\n open_set.remove(curr)\n closed_set.add(curr)\n\n for neighbor, cost in get_successors(curr, grid):\n # ignore neighbors which have already been evaluated\n if neighbor in closed_set:\n continue\n\n curr_g_score = g_cost[curr] + cost\n # add neighbor to newly discovered nodes\n if neighbor not in open_set:\n open_set.add(neighbor)\n\n # if we've already got a lower g_score for neighbor, then move on\n elif curr_g_score >= g_cost[neighbor]:\n continue\n\n prev[neighbor] = curr\n g_cost[neighbor] = curr_g_score\n f_cost[neighbor] = g_cost[neighbor] + heuristic_cost(neighbor, end)\n\n # if we get to this point, it's not possible to reach the end destination\n return []", "def __init__(self, gameState, costFn = lambda x: 1, goal=(1,1), start=None, warn=True, visualize=True):\n self.goal=(1,1)\n self.goals=[]\n self.walls = gameState.getWalls()\n self.startState = gameState.getPacmanPosition()\n if start != None: self.startState = start\n\n n=0\n try:\n for j in range(1, 40):\n n=j\n x=gameState.hasWall(1, j)\n except:\n n=n\n m=0\n try:\n for i in range(1, 40):\n m=i\n x=gameState.hasWall(i, 1)\n except:\n m=m\n print('maze dimension: ',m,'x',n)\n\n for i in range(1,m):\n for j in range(1,n):\n if (gameState.hasFood(i,j)):\n if(gameState.getNumFood()==1):\n self.goal=(i,j)\n else:\n x=(i,j)\n self.goals.append(x)\n\n #print('goals',self.getFoodPositions())\n self.costFn = costFn\n self.visualize = visualize\n #x=getFoodPosition(gameState)\n #print(\"food positions: \" )\n print(\"[R12] Initial position of pacman is \"+str(gameState.getPacmanPosition()))\n print(\"[R10] Number of foods is \"+str(gameState.getNumFood()))\n if(gameState.getNumFood()>1):\n print(\"[R10] Final goal positions are \", self.goals)\n else:\n print(\"[R10] Final goal position is \"+str(self.goals))\n print(\"[R11] Ghost Positions is/are \"+str(gameState.getGhostPositions()))\n print(\"[R15] has the game food? \"+str(gameState.hasFood(*goal)))\n if warn and (gameState.getNumFood() != 1 or not gameState.hasFood(*goal)):\n print('Warning: this does not look like a regular search maze')\n\n # For display purposes\n self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE", "def cons_heuristic(state, goal_state): \n unused_stacks = set(range(len(state.stack_containers))) - set(goal_state.keys())\n unused_index = list(unused_stacks)[0] if unused_stacks else -1\n cost = 0\n height_dict = {} # Dict for storing element and height index in goal state\n for i in range(len(state.stack_containers)):\n for element in state.stack_containers[i]:\n h_stack = get_heurisitc_cost_stacks(element, i, goal_state, unused_stacks, unused_index)\n cost += h_stack[0] \n if h_stack[1] > -1:\n height_dict[element] = h_stack[1] \n for i in range(len(state.stack_containers)):\n for j in range(len(state.stack_containers[i])):\n c = get_heuristic_cost_height(state.stack_containers[i][j], j, height_dict)\n cost += get_heuristic_cost_height(state.stack_containers[i][j], j, height_dict) \n return cost", "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n from util import PriorityQueue\n pq = PriorityQueue()\n # visited = []\n mapper = {}\n costs = {}\n start = problem.getStartState()\n mapper[start] = None\n costs[start] = 0\n pq.push(start, 0)\n\n while not (pq.isEmpty()):\n # print costs\n point = pq.pop()\n if problem.isGoalState(point):\n current = point\n l = []\n while mapper[current] != None:\n tup = mapper[current]\n l.append(tup[1])\n current = tup[0]\n l.reverse()\n print l\n return l\n for child in problem.getSuccessors(point):\n if not child[0] in mapper:\n cost = costs[point] + child[2]\n if (child not in costs) or (cost < costs[child[0]]):\n costs[child[0]] = cost\n full_cost = cost + heuristic(child[0], problem)\n pq.push(child[0], full_cost)\n mapper[child[0]] = point, child[1]", "def astarSearchWithoutAdmissible(self):\n\n self.frontier = PriorityQueueImpl(self.priorityByHeuristicFunction)\n self.explored = set()\n\n if self.__isTileGoalState(self.startingPoint):\n print(\"Path is found: \" + str(self.startingPoint) + \" with 0 cost\")\n return\n\n self.frontier.enqueue(TileWithHeuristic(self.startingPoint, [], 0, not self.isAllTileAdmissible))\n\n while self.frontier.isEmpty() != True:\n tile = self.frontier.dequeue()\n tileCoordinate = tile.coordinate\n tileCost = tile.cost\n\n if self.__isTileGoalState(tileCoordinate):\n self.__printThePath(tile)\n return\n\n self.explored.add(tile)\n\n adjacentList = self.__findAdjacentsToThisPoint(tileCoordinate)\n for eachPoint in adjacentList:\n if not self.__isTileWall(eachPoint):\n eachTile = TileWithHeuristic(eachPoint, tile.pathToTile, self.__getElementFromPairs(eachPoint),\n not self.isAllTileAdmissible)\n if self.__isTileGoalState(eachTile.coordinate):\n eachTile.heuristic = 0\n if not self.__isTileInExplored(eachTile):\n eachTile.cost = self.__getElementFromPairs(eachPoint) + tileCost + 1\n eachTile.heuristicFunction = eachTile.cost + eachTile.heuristic\n self.frontier.enqueue(eachTile)", "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n from util import PriorityQueue\n import math\n\n frontier = PriorityQueue()\n explored = []\n actions = []\n\n class node:\n def __init__(self, path, dad, action):\n self.path = path\n self.dad = dad\n self.action = action\n h = heuristic(path,problem.goal)\n if dad == None:\n self.g=0\n else:\n self.g = dad.g + heuristic(dad.path,path)\n self.cost = round(self.g + h,1)\n\n start = node(problem.getStartState(),None,'')\n frontier.push(start,start.cost)\n\n while frontier.isEmpty() == False:\n path = frontier.pop()\n successors = problem.getSuccessors(path.path)\n explored.append(path)\n for vertex in successors:\n achou = False\n for path_ex in explored:\n if vertex[0] == path_ex.path:\n achou = True\n\n if achou == False:\n successor = node(vertex[0],path,vertex[1])\n frontier.push(successor,successor.cost)\n if problem.isGoalState(successor.path):\n while len(explored) > 0:\n ant = explored.pop()\n if ant.path == successor.dad.path:\n actions.append(successor.action)\n successor = ant\n actions.reverse()\n return actions" ]
[ "0.7437132", "0.7243507", "0.7075924", "0.7064572", "0.6833929", "0.6753228", "0.6737725", "0.64962655", "0.6481526", "0.6475066", "0.6400677", "0.63954955", "0.63701344", "0.6343227", "0.63267416", "0.63048136", "0.6302781", "0.6302682", "0.6297269", "0.6265315", "0.62526554", "0.62487996", "0.6243192", "0.6236099", "0.62299854", "0.6227819", "0.6213307", "0.62056553", "0.62024444", "0.6195679", "0.61769426", "0.6170765", "0.61664593", "0.6143414", "0.61344296", "0.61318874", "0.6128335", "0.6109451", "0.6092398", "0.6087845", "0.60833627", "0.6062829", "0.60549104", "0.60540545", "0.60451984", "0.60393786", "0.6031747", "0.60295093", "0.6021779", "0.6012137", "0.5987797", "0.5975404", "0.597292", "0.5972251", "0.59650344", "0.59637517", "0.59619206", "0.5960058", "0.59506696", "0.5943358", "0.59307504", "0.59295106", "0.59239084", "0.59222823", "0.5909913", "0.590225", "0.58974266", "0.5896282", "0.5895817", "0.5894982", "0.5891505", "0.5888618", "0.5886265", "0.5880387", "0.5878018", "0.58772933", "0.58764315", "0.5874395", "0.58693993", "0.5858791", "0.5854229", "0.5840371", "0.5830153", "0.5825526", "0.5823574", "0.58172935", "0.58144736", "0.5814113", "0.58112043", "0.5808462", "0.57994735", "0.57986236", "0.579612", "0.5795232", "0.5790043", "0.5788558", "0.5785751", "0.57838196", "0.5781831", "0.5780236" ]
0.8600155
0
Total Manhattan Distance Heuristic (neither admissible nor consistent) (this heuristic is included moreso to show the idea Best Path is based, but it is often more effective than Hamming even if it isn't admissible) Gives the roomba the ability to pass through walls and ignore additional cost on carpet 1. Find closest dirty tile in manhattan distance 2. Move roomba to closest dirty tile 3. Repeat 12 until all dirty tiles are clean The heuristic is the total manhattan distance if the roomba moves to the closest dirty tile every time.
def spotlessroomba_third_heuristic(state : SpotlessRoombaState) -> float: h = 0 current_position = state.position dirty_locations = list(state.dirty_locations) partial_heuristic = INF closest_dirty = 0 while dirty_locations: for i in range(len(dirty_locations)): manhattan = abs(current_position.row - dirty_locations[i].row) + abs(current_position.col - dirty_locations[i].col) if manhattan < partial_heuristic: partial_heuristic = manhattan closest_dirty = i h += partial_heuristic current_position = dirty_locations.pop(closest_dirty) partial_heuristic = INF return h
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def manhattan_distance(self):\n dist = 0\n for target, tile in zip(self.winCdt[:-1], self.tiles[:-1]):\n dist += abs(target[0] - tile[0]) + abs(target[1] - tile[1])\n return dist", "def calculate_manhattan_dist(self):\n return self._current_cost + abs(self._current_loc.get_row() - self._goal_loc.get_row()) +\\\n abs(self._current_loc.get_column() - self._goal_loc.get_column())", "def heuristicManhattan(state):\n t = state.node.getTiles()\n tArray = [t[i:i+3] for i in range(0, 9, 3)]\n heuristik = 0\n for row in range(len(tArray)):\n for col in range(len(tArray[row])):\n if tArray[row][col] == 1:\n heuristik += abs(row) + abs(col - 1)\n elif tArray[row][col] == 2:\n heuristik += abs(row) + abs(col - 2)\n elif tArray[row][col] == 3:\n heuristik += abs(row - 1) + abs(col)\n elif tArray[row][col] == 4:\n heuristik += abs(row - 1) + abs(col - 1)\n elif tArray[row][col] == 5:\n heuristik += abs(row - 1) + abs(col - 2)\n elif tArray[row][col] == 6:\n heuristik += abs(row - 2) + abs(col)\n elif tArray[row][col] == 7:\n heuristik += abs(row - 2) + abs(col - 1) \n elif tArray[row][col] == 8:\n heuristik += abs(row - 2) + abs(col - 2)\n return heuristik", "def manhattan_distance(state, goal):\r\n hval = 0\r\n for index, value in enumerate(state):\r\n if value == 0: # Underestimate by excluding calculation of the blank tile\r\n continue\r\n abs_x = abs((co_ords[index])[0] - (co_ords[goal.index(value)])[0])\r\n abs_y = abs((co_ords[index])[1] - (co_ords[goal.index(value)])[1])\r\n hval += abs_x + abs_y\r\n return hval", "def use_manhatten_heur(self):\r\n\t\tdistance = 0\r\n\r\n\t\tfor row in range(self.n):\r\n\t\t\tfor col in range(self.n):\r\n\t\t\t\tintendedX, intendedY = BoardClass.goalTileLocations[self.board[row][col]]\r\n\t\t\t\tdistance += (abs(row - intendedX) + abs(col - intendedY))\r\n\r\n\t\tself.heuristic = distance", "def heuristic_manhattan_distance(self):\n distance = 0\n\n for i in range(self.PUZZLE_NUM_ROWS):\n for j in range(self.PUZZLE_NUM_COLUMNS):\n i1, j1 = self._get_coordinates(self.position[i][j], self.PUZZLE_END_POSITION)\n distance += abs(i - i1) + abs(j - j1)\n\n return distance", "def foodHeuristic(state, problem):\n import itertools\n\n\n\n def manhattan(startPosition, targetPosition):\n xy1 = startPosition\n xy2 = targetPosition\n return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])\n\n position, foodGrid = state\n\n return len(foodGrid.asList())\n #\n # \"\"\"\n # The below algorithm is from:\n # https://stackoverflow.com/questions/9994913/pacman-what-kinds-of-heuristics-are-mainly-used\n #\n # Find real/manhattan distance between two currently furthest fruits in labyrinth - let's call that x.\n # Find real/manhattan distance from current Pacman position to the closer of previous two fruits - let's call that y.\n # Then, answer is just: x + y.\n # The interpretation of this x + y formula could be something like this:\n #\n # x - either way, you will have to travel this distance, at least at the end\n # y - while you are at the some of the two furthest fruits, it's better to collect\n # the food that is near to it so you don't have to go back\n # \"\"\"\n # maxFoodPairDistance = 0\n #\n # if len(foodGrid.asList()) >= 2:\n #\n # #calculate manhattan/real distance between each pair of food (all permutations in foodGrid) and find the maximum of them, and\n # #store the pair with max distance in maxFoodPair\n # for foodPair in itertools.permutations(foodGrid.asList(),2):\n # #foodPairDistance = mazeDistance(foodPair[0], foodPair[1], problem.startingGameState)\n # foodPairDistance = manhattan(foodPair[0], foodPair[1])\n # if foodPairDistance >= maxFoodPairDistance:\n # maxFoodPairDistance = foodPairDistance\n # maxFoodPair = foodPair\n #\n # #get the real distance between pacman and nearest food among the max distance food pair we get above. Using real distance instead\n # #of manhattan distance here just to \"reduce\" the number of nodes expand to get additional point. But that's a bit of a cheating\n # #because the mazeDistance function use of breadth First search - which itself is a search with nodes expansion not counted here\n # #minPacmanToFoodDistance = min([mazeDistance(position, foodPosition, problem.startingGameState) for foodPosition in maxFoodPair])\n # minPacmanToFoodDistance = min([manhattan(position, foodPosition) for foodPosition in maxFoodPair])\n #\n # #When only one food left, just return the real distance between pacman and food\n # elif len(foodGrid.asList()) == 1:\n # foodPosition = foodGrid.asList()[0]\n # #minPacmanToFoodDistance = mazeDistance(position, foodPosition, problem.startingGameState)\n # minPacmanToFoodDistance = manhattan(position, foodPosition)\n # else:\n # minPacmanToFoodDistance = 0\n #\n # return minPacmanToFoodDistance + maxFoodPairDistance", "def manhattan_heuristic(state, problem=None):\n return util.manhattanDistance(state[0], problem.goal)", "def manhattanDistance(self):\n\n # Loop trough batteries and gridpoints calculate\n # manhattan distance between them\n for battery in self.batteries:\n for gridPoint in self.gridPoints:\n distance = (abs(gridPoint.xLocation - battery.xLocation)\n + abs(gridPoint.yLocation - battery.yLocation))\n gridPoint.manhattanDistance.append(distance)\n\n # If house on gridPoint, append distance to house\n for house in self.houses:\n if (house.xLocation == gridPoint.xLocation and\n house.yLocation == gridPoint.yLocation):\n house.manhattanDistance.append(distance)", "def get_manhattan_distance_heuristic(board):\n\n possible_moves = get_possible_moves(board)\n prioritized_moves = []\n\n # Prioritize each move and save tuples to list\n for move in possible_moves:\n nr = get_manhattan_distance(move)\n heappush(prioritized_moves, (nr, move.current_state))\n\n return prioritized_moves", "def spotlessroomba_second_heuristic(state : SpotlessRoombaState) -> float:\n # TODO a nontrivial consistent heuristic\n \n if not state.dirty_locations:\n return 0\n \n best_start = 0 # best dirty tile to start from\n best_cost = INF # cost of the path from the above start tile\n\n for i in range(len(state.dirty_locations)):\n estimate_cost = 0\n lowest_cost = INF\n closest_dirty = 0\n dirty_locations = list(state.dirty_locations)\n current_pos = dirty_locations.pop(i)\n\n # find the shortest cost solution path from this starting tile\n while dirty_locations:\n for j in range(len(dirty_locations)):\n manhattan = abs(current_pos.row - dirty_locations[j].row) + abs(current_pos.col - dirty_locations[j].col)\n if manhattan < lowest_cost:\n lowest_cost = manhattan\n closest_dirty = j\n estimate_cost += lowest_cost\n current_pos = dirty_locations.pop(closest_dirty)\n lowest_cost = INF\n # if estimated path cost is cheaper than best path cost so far, replace best_cost and best_start\n if estimate_cost < best_cost:\n best_cost = estimate_cost\n best_start = i\n # if estimated path cost and best path cost so far are equal, tiebreak with proximity to start tile\n if estimate_cost == best_cost:\n current_pos = state.position\n dist_to_prev_best = abs(current_pos.row - state.dirty_locations[best_start].row) + abs(current_pos.col - state.dirty_locations[best_start].col)\n dist_to_i = abs(current_pos.row - state.dirty_locations[i].row) + abs(current_pos.col - state.dirty_locations[i].col)\n if dist_to_i < dist_to_prev_best:\n best_start = i\n \n\n current_pos = state.position\n # Calculate distance to the best start tile\n dist_to_start = abs(current_pos.row - state.dirty_locations[best_start].row) + abs(current_pos.col - state.dirty_locations[best_start].col)\n # Returned heuristic is the sum of distance to the start tile and estimated cost from said tile\n return dist_to_start + best_cost", "def _mine_heuristic(bot, problem):\n if bot.contains(problem.get_block_id()):\n return 0\n\n bot_pos = bot.get_pos()\n dest_pos = problem.get_block_loc()\n\n # If man == dy: return man + 1\n # If man > dy: return man\n # If man < dy: return dy?\n man_dist = _manhattan((bot_pos.x, bot_pos.z), (dest_pos.x, dest_pos.z))\n y_diff = bot_pos.y - dest_pos.y\n if y_diff < 0:\n y_diff += 1\n\n if y_diff == 0:\n return man_dist\n\n # Transform so that it's only dropping\n drop = _DROP if y_diff > 0 else 1\n y_diff = abs(y_diff)\n\n drops = _drops(y_diff, drop)\n\n if man_dist > drops:\n return man_dist\n if man_dist == drops:\n return man_dist + 1\n if drop == 1:\n return drops\n if y_diff % drop == 1:\n return drops\n return drops + 1", "def manhattan_distance(self):\n return calculate_manhattan_distance(self.location, self.target_location)", "def calculate_manhattan_dist(state):", "def update_heuristic(self):\n self.heuristic = self.manhattan_distance()", "def manhattan_heuristic(pos, problem):\n return abs(pos[0] - problem.goal_pos[0]) + abs(pos[1] - problem.goal_pos[1])", "def heuristic(self):\n if self._dist < 0:\n self._dist = 0\n for pos, idx in enumerate(self.config):\n if idx != 0: # Skip blank\n self._dist += manhattan_dist(idx, pos, self.n)\n return self._dist", "def get_heuristic(self, state):\n\n def get_manhattan_distance(coord_a, coord_b):\n \"\"\"Returns the manhattan distance between coord_a and coord_b.\"\"\"\n return abs(coord_a.x - coord_b.x) + abs(coord_a.y - coord_b.y)\n\n \n def get_num_obstacles(coord_a, coord_b):\n \"\"\"Returns the number of obstacles (wriggler segments or walls) between\n coord_a and coord_b.\n \n This function assumes that coord_b is larger (in either/both x and y)\n than coord_a.\n \"\"\"\n obstacle_count = 0\n \n for x in range(coord_a.x, coord_b.x + 1):\n for y in range(coord_a.y, coord_b.y + 1):\n coord = Coordinate(x, y)\n if coord in self.wall_coords or coord in state:\n obstacle_count += 1\n \n return obstacle_count\n\n\n head_coord = state.wriggler_list[0].get_head()\n tail_coord = state.wriggler_list[0].get_tail()\n \n head_manhattan_distance = get_manhattan_distance(head_coord, self.goal_coord)\n tail_manhattan_distance = get_manhattan_distance(tail_coord, self.goal_coord)\n \n # Calculate and return heuristic value depending on which heuristic to use\n if self.heuristic == Heuristic.MANHATTAN_DIST:\n # Return the shortest Manhattan distance of wriggler0's tail or head to the goal\n return min(head_manhattan_distance, tail_manhattan_distance)\n \n else: # self.heuristic == Heuristic.NUM_OBSTACLES:\n # Return the number of obstacles between wriggler0's tail/head to the goal\n # The tail/head is selected based on which is closer to the goal\n if head_manhattan_distance <= tail_manhattan_distance:\n # The head is closer or the same distance away\n return get_num_obstacles(head_coord, self.goal_coord)\n \n else:\n # The tail is closer\n return get_num_obstacles(tail_coord, self.goal_coord)", "def manhattan_heuristic(state):\n man_h = 0\n size = len(state)\n for i in range (size):\n for j in range (size):\n if state[i][j] == 0:\n continue\n else:\n man_h = man_h + abs(i - int(state[i][j]/3)) + abs(j - (state[i][j])%3)\n return man_h", "def foodHeuristic(state, problem):\n\n position, foodGrid = state\n\n # *** Your Code Here ***\n if len(foodGrid.asList()) == 0: # If no food, then no need to go on\n return 0\n trackHeuristic = []\n # Manhattan dist between curr node position and all foods\n # If there is food, iterate through all available foods\n for food in foodGrid.asList():\n currentHeuristic = distance.manhattan(position, food)\n trackHeuristic.append(currentHeuristic)\n return max(trackHeuristic)", "def manhattan(self):\n distance = 0\n for i in range(3):\n for j in range(3):\n if self.plateau[i][j] != 0:\n x, y = divmod(self.plateau[i][j]-1, 3)\n distance += abs(x - i) + abs(y - j)\n return distance", "def manhattan_heuristic_cost(curr, end):\n curr_x, curr_y = curr\n end_x, end_y = end\n return abs(curr_x-end_x) + abs(curr_y-end_y)", "def __manhattan(self, x_state, y_state, x_goal, y_goal):\n distance = (abs(x_state - x_goal) + abs(y_state - y_goal))\n return distance", "def manhattan_distance(puzzle):\n\n\tcount = 0\n\tdimension = puzzle.dimension\n\tfor i in range(dimension):\n\t\tfor j in range(dimension):\n\t\t\tcorrect_row = math.floor((puzzle.board[i][j] - 1) / dimension) if puzzle.board[i][j] != 0 else dimension - 1\n\t\t\tcorrect_column = (puzzle.board[i][j] % dimension) - 1 if puzzle.board[i][j] % dimension != 0 else dimension - 1\n\n\t\t\tcount += abs(i - correct_row) + abs(j - correct_column)\n\n\treturn count", "def manhattanDistance(Ship):\n\n return abs(Ship.n - Ship.s) + abs(Ship.e - Ship.w)", "def _return_heuristic(bot, problem):\n bot_pos = bot.get_pos()\n player_pos = problem.get_player_loc()\n bot_plane_pos = (bot.x, bot.z)\n\n y_diff = bot_pos.y - player_pos.y\n\n drop = _DROP if y_diff > 0 else 1\n y_diff = abs(y_diff)\n drops = _drops(y_diff, drop)\n min_man = float('inf')\n for dir_ in _adj_dirs():\n loc = player_pos + 2 * dir_\n man_dist = _manhattan(bot_plane_pos, (loc.x, loc.z))\n if man_dist < min_man:\n min_man = man_dist\n if man_dist < drops:\n return drops\n return min_man", "def get_manhattan_distance(coord_a, coord_b):\n return abs(coord_a.x - coord_b.x) + abs(coord_a.y - coord_b.y)", "def heuristic(current, goal):\r\n # First tried manhattan distance but wasn't good enough so did direct distance which makes sense since the robot came move diagonally \r\n #return abs(current[0]-goal[0])+abs(current[1]-goal[1])\r\n return math.sqrt((current[0]-goal[0])**2+(current[1]-goal[1])**2)", "def heuristic_cal(current: list, goal: list) -> int:\n\n current_locations = state_to_locations(current)\n goal_locations = state_to_locations(goal)\n\n h_val = 0 # Tracks the cost of the heuristic function\n for i in range(1, 16):\n h_val += (abs(current_locations[i][0] - goal_locations[i][0]) +\n abs(current_locations[i][1] - goal_locations[i][1]))\n \"\"\" Loops through both lists of locations and adds the Manhattan distance \n of each number to the sum h_val. The range is from 1 to 16 because the \n blank in either state is not taken into account.\"\"\"\n\n return h_val", "def foodHeuristic(state, problem):\n position, foodGrid = state\n \"*** YOUR CODE HERE ***\"\n \"\"\"\n Mi heurística consiste en hacer simplemente el máximo de las distancias reales del state a cada nodo con comida\n He provado diferentes heurísticas y esta es la que me expande menos nodos, aunque no es la más óptima temporalmente\n Tardé mucho tiempo en darme cuenta de que había una función que calculaba la distancia real entre dos nodos\n NOTA: NO EJECUTAR CON LABERINTOS MÁS GRANDES QUE EL tinySearch. El algoritmo requiere muchísimo tiempo\n \"\"\"\n max = 0 # Inicializo el máximo en 0\n for food in foodGrid.asList(): # Esto me da cada food como un nodo (x,y), pero sólo los nodos que tengan comida\n distance = mazeDistance(position, food, problem.startingGameState) # Distancia real del state a una comida\n if max < distance: # Cálculo del máximo\n max = distance\n return max\n\n # La siguiente heurística también servía, y de hecho tardaba mucho menos, pero el autograder me daba 2/4\n # ya que se expandían más de 12.000 nodos.\n # return len(foodGrid.asList())", "def heuristic(state, puzzle):\n h = 0\n for i in range(puzzle.dimension):\n for j in range(puzzle.dimension):\n # (0, 0) -> 1 as value, (0, 2) -> 3 as value, etc\n value = i * puzzle.dimension + j + 1\n if value == puzzle.dimension ** 2: # value is ' '\n value = ' '\n current_position = puzzle.get_coordinates(state, value)\n goal_position = (i, j)\n h += util.manhattanDistance(current_position, goal_position)\n h /= 2\n return h", "def manhattan_distance(x, y):\n return abs(x) + abs(y)", "def test_6_walls(self):\n grid_S = MAPPGridState.create_from_string(\n [\"#.#0###\",\n \"#.#.###\",\n \".......\",\n \"###.#.#\",\n \"###.#1#\"])\n \n grid_G = MAPPGridState.create_from_string(\n [\"#.#1###\",\n \"#.#.###\",\n \".......\",\n \"###.#.#\",\n \"###0#.#\"])\n plan = astar(grid_S,\n lambda s : s == grid_G,\n MAPPDistanceSum(grid_G))\n self.assertEqual(10,sum(a.cost for a in plan))", "def astar_corner(maze):\n # TODO: Write your code here\n \"\"\"\n Plan:\n Do normal a* but then .clear visited after each new goal is found\n new h = Manhattan distance to the nearest goal and then the manhattan distance to the other goals starting from this nearest goal. \n new priority queue -- tuple (f, x&y, goals_left, \n \"\"\"\n pq = []\n visited = {}\n\n goals = maze.getObjectives()\n start = maze.getStart()\n\n tie = 1\n #\n # tuple = (f,g,h,x&y,tiebreaker, goals left, currpath, visited)\n f = min_manhattan(goals, start)\n curr = (f, 0, f, start, goals, 0, [])\n heapq.heappush(pq, curr)\n\n food = None\n while len(pq) > 0:\n curr = heapq.heappop(pq)\n #print(\"curr:\", curr)\n if curr[3] in curr[4]:\n curr[4].remove(curr[3])\n if len(curr[4]) == 0:\n #print(\"DONE\")\n #print(food)\n food = curr\n break\n neighbors = maze.getNeighbors(curr[3][0], curr[3][1])\n for n in neighbors:\n curr_goals_left = curr[4].copy()\n curr_visited = curr[6].copy()\n tie += 1\n #print(\"curr[6]: \", curr[6])\n #print(\"n: \", n)\n #print(\"curr[4]: \", curr[4])\n h2 = min_manhattan(curr[4], n)\n f2 = h2 + curr[1]\n g2 = curr[1] + 1\n\n node_new = (f2, g2, h2, n, curr_goals_left, tie, curr_visited)\n \n if node_new[3] not in visited or node_new[4] not in visited[node_new[3]][1]:\n if node_new[3] not in visited:\n visited[node_new[3]] = (node_new[3], [])\n visited[node_new[3]][1].append(node_new[4])\n node_new[6].append(curr[3])\n heapq.heappush(pq, node_new)\n\n if food is None:\n return []\n\n food[6].append(food[3])\n\n return food[6]", "def manhattan_distance(self):\n x, y = self.start\n other_x, other_y = self.x, self.y\n print(abs(x - other_x) + abs(y - other_y))", "def calculate_manhattan_dist(idx, value, n):\n\n idx_value = value\n config = [1,2,3,4,5,6,7,8]\n config_test = tuple(config.insert(idx, 0))\n test_puzzle_state = PuzzleState(config_test, n)\n config_goal = tuple(config.insert(idx_value, 0))\n goal_puzzle_state = PuzzleState(config_goal, n)\n\n frontier_set = Q.Queue(0)\n frontier_set.put(test_puzzle_state)\n explored_set = []\n parent_to_child_dict = {}\n \n while not frontier_set.empty():\n fringe = frontier_set.get()\n \n if fringe.config == config_goal:\n break\n else:\n discovered_nodes = []\n expanded_set = fringe.expand()\n \n for i in expanded_set:\n if i not in frontier_set and explored_set:\n discovered_nodes += [i]\n frontier_set.put(i)\n \n parent_to_child_dict[fringe] = discovered_nodes\n explored_set += [fringe]\n\n manhattan_dist = calculate_total_cost(test_puzzle_state, goal_puzzle_state, parent_to_child_dict)\n \n return manhattan_dist", "def calculate_manhattan_dist(idx, value, n):\n pass", "def manhattan_distance(origin, destination):\n return abs(destination.row - origin.row) + \\\n abs(destination.column - origin.column)", "def test_manhattan_distance(self):\n knn = Knn(n_neighbors=3)\n knn.fit(np.array(little_X), little_Y)\n d = knn._manhattan_distance(np.array([5,6]))\n assert (d == [7, 7]).all(), \"Manhattan Distance is not correct\"", "def get_manhattan_distance(node):\n result = 0\n\n for idx, val in enumerate(node):\n if idx != val:\n result += abs(idx - val)\n\n return result", "def get_manhattan_dist(row1, col1, row2, col2):\n distHoriz = abs(row1 - row2)\n distVert = abs(col1 - col2)\n dist = distHoriz + distVert\n return dist", "def manhattan_distance_between(start, destination):\n return abs(destination.x - start.x) + abs(destination.y - start.y)", "def ManhattanTest(vector_list):\n\n distance = 0\n\n for index in range(len(vector_list)):\n\n try:\n distance += distance_manhattan(vector_list[index], vector_list[index + 1])\n except IndexError:\n continue\n\n return distance", "def calculate_made_up_dist(self):\n\n # Ensure if current state equals goal, cost is only the current cost\n if self._goal_loc == self._current_loc:\n return self._current_cost\n\n # Distance is at least the Manhattan distance as cannot move diagonal\n estimated_distance = self.calculate_manhattan_dist()\n\n # Assume two board parts in the priority queue have the same weight.\n # For those board paths with higher actual cost and lower heuristic\n # cost, there is more assurance in the accuracy of the actual cost\n # than in the heuristic cost. Give a very small penalty (i.e. less\n # than one step) to prefer a path with a higher known cost than a\n # path with a higher heuristic cost.\n # Extract the number of portion of the move cost from the heuristic\n heuristic_cost = estimated_distance - self._current_cost\n # Heuristic cost penalty is normalized to a maximum of 0.1 steps\n # This is achieved by dividing the heuristic cost by the size of the\n # board. Since the heuristic cost can never be larger than the board\n # size, this quotient is less than or equal to 1. To normalize to a\n # maximum of 0.1, just multiply the number by 0.1. This is than added\n # to the estimated distance determined so far.\n heuristic_cost_penalty = 0.1 * heuristic_cost\n heuristic_cost_penalty /= BoardPath._traversed_board_size\n # Add what is essentially an \"uncertainty penalty\"\n estimated_distance += heuristic_cost_penalty\n\n # In case where all neighboring spaces are blocked or already\n # traversed, then set the path cost prohibitively large so it is\n # given minimum priority.\n if not (self.is_move_valid(\"d\", BoardPath._traversed_board)) \\\n and not (self.is_move_valid(\"u\", BoardPath._traversed_board)) \\\n and not (self.is_move_valid(\"l\", BoardPath._traversed_board)) \\\n and not (self.is_move_valid(\"r\", BoardPath._traversed_board)):\n # Total board area is sufficient as a prohibitive distance\n estimated_distance += BoardPath._traversed_board_size\n return estimated_distance\n\n # If all next steps that load directly to the goal are blocked, then\n # it takes at least two additional moves to get around the blocked\n # paths it (due to an obstacle or already traversed square) so add\n # two to the estimated distance to include that cost.\n if self._is_all_direct_next_moves_blocked(BoardPath._traversed_board):\n estimated_distance += 2\n\n # In a heap, if two nodes have the same cost, the object that was\n # put into the heap first in many implementations will be on top of the\n # heap. To make the algorithm more efficient, apply a slight penalty to\n # a non valid solution to ensure if an invalid solution and a valid\n # solution have the same cost that the valid solution would always be\n # on top of the heap. This is done by giving all non-valid solutions a\n # penalty term that is greater than zero and less than the minimum step\n # size (e.g. in this case 0 < 0.1 < 1).\n estimated_distance += 0.1\n\n # Return estimated distance\n return estimated_distance", "def heuristic(self):\r\n # 1.\r\n blacks, whites = 0, 0\r\n weights = [0 for _ in range(6)]\r\n directions = [[-1, -1], [-1, 1], [1, 1], [1, -1]]\r\n user_dir = directions[:2] if self.current_player == 'n' else directions[2:]\r\n for i in range(8):\r\n for j in range(8):\r\n blacks += 1 if self.matrix[i][j] in ['N', 'n'] else 0\r\n whites += 1 if self.matrix[i][j] in ['A', 'a'] else 0\r\n if self.matrix[i][j] == self.current_player or self.matrix[i][j] == self.current_player.upper():\r\n\r\n # numarul de piese rege\r\n if self.matrix[i][j] == self.current_player.upper():\r\n weights[1] += 7.75\r\n\r\n # numarul de piese normale\r\n else:\r\n weights[0] += 5\r\n\r\n # numarul de piese de pe baseline in functie de tipul de piesa\r\n # conform strategiilor de joc este o strategie buna sa ai cat mai multe\r\n # piesa pe baseline pentru a preveni creare de piese de tip rege ale adversarului\r\n if self.current_player in ['n', 'N']:\r\n if i == 7:\r\n weights[2] += 4\r\n elif self.current_player in ['a', 'A']:\r\n if i == 0:\r\n weights[2] += 4\r\n\r\n # numarul de piese din mijlocul tablei\r\n # la fel este o strategie buna pentru atac\r\n if 3 <= i <= 4 and 3 <= j <= 4:\r\n weights[3] += 2\r\n\r\n # numar piese vulnerabile\r\n # adica piese ce pot fi capturate de oponent la urmatoare tura\r\n for d in user_dir:\r\n\r\n vx = d[0] + i\r\n vy = d[1] + j\r\n back_x = i - d[0]\r\n back_y = j - d[1]\r\n next_x, next_y = vx + d[0], vy + d[1]\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(back_x, back_y) and self.matrix[back_x][back_y] == '.':\r\n weights[4] -= 3\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(next_x, next_y) and self.matrix[next_x][next_y] == '.':\r\n # daca elimin o piesa rege este o mutare mai buna\r\n if self.matrix[vx][vy] == self.opponent().upper():\r\n weights[5] += 10\r\n else:\r\n weights[5] += 7\r\n\r\n diff = (blacks - whites) if self.current_player == 'n' else (whites - blacks)\r\n # cand sunt mai putin piese, AI adopta o tactica mai ofensiva\r\n if blacks + whites <= 10:\r\n return sum(weights) + diff\r\n return sum(weights)", "def return_manhattan_distance(coord1, coord2):\n x1, y1 = coord1\n x2, y2 = coord2\n\n return float(abs(x2-x1) + abs(y2-y1))", "def heuristic_2(node):\n x_node, y_node = node.state.location()\n goals = node.state.grid.components.white_walkers\n distance = [np.sqrt((x_node - x)**2 + (y_node - y)**2) for x, y in goals]\n return np.sum(distance)", "def cornersHeuristic(state, problem):\n\n # Useful information.\n # corners = problem.corners # These are the corner coordinates\n # walls = problem.walls # These are the walls of the maze, as a Grid.\n\n # *** Your Code Here ***\n corners = problem.corners # These are the corner coordinates\n # walls = problem.walls # These are the walls of the maze, as a Grid.\n\n # Get unvisited corners\n successor = [False, False, False, False]\n currentPosition = state[0]\n currentStatus = state[1]\n\n # Take the manhattan distance of the nodes\n # current position and all corners tuple location\n # Iterate through all corners\n for corner in range(len(corners)):\n successor[corner] = distance.manhattan(currentPosition,\n corners[corner]) * (not currentStatus[corner]) # Ignore corners already visited\n return max(successor) # Return the max value from all calculated manhattan values of all corner", "def cornersHeuristic(state, problem):\n corners = problem.corners # These are the corner coordinates\n walls = problem.walls # These are the walls of the maze, as a Grid (game.py)\n distance = []\n for i in range(len(corners)):\n distance.append(fabs((corners[i][0] - state[0][0]) + (corners[i][1] - state[0][1])))\n \"*** YOUR CODE HERE ***\"\n return min(distance) # Default to trivial solution", "def distManhattan(p1,p2):\n (x1,y1)=p1\n (x2,y2)=p2\n return abs(x1-x2)+abs(y1-y2)", "def manhattenDistance(self, position, goal):\n\n\t\treturn sum(abs(a-b) for a,b in zip(position,goal))", "def manhattanDistance(loc1, loc2):\n # BEGIN_YOUR_CODE (our solution is 1 line of code, but don't worry if you deviate from this)\n return(sum(tuple(abs(i-j) for i,j in zip(loc1,loc2))))\n # END_YOUR_CODE", "def distance_score(vertex1, board, player_id): #implement preference for closer settlements\n num_buildings = 0\n total_dist = 0\n player_buildings = board.get_player_settlements(player_id) + board.get_player_cities(player_id)\n\n if len(player_buildings) == 0: #if it is our first turn\n return 0\n\n player_roads = board.get_player_roads(player_id)\n accessible_vertices = list(set(player_buildings+ [vertex for pair in player_roads for vertex in pair]))\n get_distance = lambda v: manhattan_distance(v, vertex1, board)\n min_distance = min(map(get_distance, accessible_vertices))\n\n enemy_buildings = [v for v in board.settlements if board.settlements[v] != player_id]\n enemy_roads = [r for r in board.roads if board.roads[r] != player_id]\n\n\n \"\"\"\n for s in board.settlements:\n if board.settlements[s] != player_id:\n vertex2 = s\n total_dist_enemies += manhattan_distance(vertex1, vertex2, board)\n num_buildings+=1\n\n for c in board.cities:\n if board.cities[c] != player_id:\n vertex2 = c\n total_dist_enemies += manhattan_distance(vertex1, vertex2, board)\n num_buildings+=1\n\n \"\"\"\n return min_distance", "def __h2(self): # _manhattan_distance\n h2 = 0\n\n for i in range(self.board_size):\n for j in range(self.board_size):\n if self.arr[i][j] == 0:\n continue\n h2 += (abs(i-(self.arr[i][j]//self.board_size)) +\n abs(j-(self.arr[i][j] % self.board_size)))\n\n return h2", "def evaluate_distance(self):\n\n fitness = 0\n routes = split_to_routes(self)\n\n for route in routes:\n route = [home] + route + [home]\n for i in range(1,len(route)):\n # Calculates full distance, including from last city\n # to first, to terminate the trip\n pos_from = route[i - 1]\n pos_to = route[i]\n distance = dm[pos_from][pos_to]\n fitness += distance\n\n return int(fitness)", "def heuristic(board, node_coordinate, start_coordinate, goal_coordinate):\r\n # The cost is initially just the Manhattan distance\r\n cost = manhattan_distance(node_coordinate, goal_coordinate)\r\n\r\n # If a node is adjacent to a gate on the CircuitBoard its cost is\r\n # drastically increased. This excludes the start and goal gates.\r\n adjacents = board.get_adjacent_coordinates(node_coordinate, cube=True)\r\n if goal_coordinate in adjacents:\r\n adjacents.remove(goal_coordinate)\r\n if start_coordinate in adjacents:\r\n adjacents.remove(start_coordinate)\r\n if any(adjacent in adjacents for adjacent in board.gate_coordinates):\r\n cost += 1000\r\n\r\n # 3/4 of amount of non-empty spots in the node's layer is added to its cost\r\n # to discourage overcrowding single layers.\r\n nonzeros = np.count_nonzero(board.board[node_coordinate[0]])\r\n cost += nonzeros * 0.75\r\n\r\n # To encourage moving upwards, each layer has a weight which decreases the\r\n # cost with a factor of the weight.\r\n height_weights = [1, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3]\r\n cost *= height_weights[node_coordinate[0]]\r\n return cost", "def spotlessroomba_first_heuristic(state : SpotlessRoombaState) -> float:\n # TODO a nontrivial admissible heuristic\n return len(state.dirty_locations)", "def calc_manhattan(self, p_object):\n total = sum([self.manhattan(self[num], p_object[num]) for num in sorted(self.num_to_pos)[1:]])\n return total", "def heuristic(cell, goal):\n return math.hypot(goal.x - cell.x, goal.y - cell.y)", "def manhattanDistance(loc1, loc2):\n # BEGIN_YOUR_ANSWER (our solution is 1 lines of code, but don't worry if you deviate from this)\n return sum([abs(loc2[i]-l1) for i, l1 in enumerate(loc1)])\n # END_YOUR_ANSWER", "def manhattan_distance(x, y):\n return sum(abs(a - b) for a, b in zip(x, y))", "def heuristic(current, goal):\r\n distance = getDistance(current, goal)\r\n return distance", "def manhattan_dist(c1, c2):\n return abs(c1[0] - c2[0]) + abs(c1[1] - c2[1]) + abs(c1[2] - c2[2])", "def second_heuristic(self):\r\n directions = [[-1, -1], [-1, 1], [1, 1], [1, -1]]\r\n # aceasta matrice indica valoarea pe care o are mutarea unei piese pe o celula aleasa\r\n # se va aduna la media ponderilor adunate in lista weights\r\n\r\n # mijlocul tablei este punctul cel mai vulnerabil\r\n # in timp ce lateralele sunt sigure,iar linia bazei transforma piesa in rege\r\n\r\n points = [[0, 4, 0, 4, 0, 4, 0, 4],\r\n [4, 0, 3, 0, 3, 0, 3, 0],\r\n [0, 3, 0, 2, 0, 2, 0, 4],\r\n [4, 0, 2, 0, 1, 0, 3, 0],\r\n [0, 3, 0, 1, 0, 2, 0, 4],\r\n [4, 0, 2, 0, 1, 0, 3, 0],\r\n [0, 3, 0, 2, 0, 2, 0, 4],\r\n [4, 0, 4, 0, 4, 0, 4, 0]]\r\n\r\n weights = [0 for i in range(4)]\r\n whites, blacks = 0, 0\r\n for i in range(8):\r\n for j in range(8):\r\n\r\n # numaram discurile de fiecare culoarea\r\n blacks += 1 if self.matrix[i][j] in ['N', 'n'] else 0\r\n whites += 1 if self.matrix[i][j] in ['A', 'a'] else 0\r\n\r\n if self.matrix[i][j] in [self.current_player, self.current_player.upper()]:\r\n\r\n # daca e piesa normala\r\n if self.matrix[i][j] == self.current_player:\r\n weights[0] += 4\r\n\r\n # cat de aproape este piesa de a deveni rege ( nr de linii din tabla - cate mai are pana ajunge pe ultima linie)\r\n\r\n # cu cat se apropie piesa mai multe de a deveni rege, scorul creste( negru - rege pentru i=0, alb -rege pentru i =7)\r\n if self.matrix[i][j] == 'n':\r\n weights[1] += (7 - i)\r\n elif self.matrix[i][j] == 'a':\r\n weights[1] += i\r\n else:\r\n # daca e piesa rege\r\n weights[0] += 8\r\n\r\n # cat de aproape este piesa rege de celelalte piese\r\n for d in directions:\r\n if self.matrix[i][j] == self.current_player.upper():\r\n # gaseste pe diagonala in directia d, o piesa adversara,daca exista\r\n x, y = self.find_piesa(i, j, d)\r\n if x and y:\r\n weights[2] += (x - i) * (x - i) + (y - j) * (y - j)\r\n vx = d[0] + i\r\n vy = d[1] + j\r\n back_x = i - d[0]\r\n back_y = j - d[1]\r\n next_x, next_y = vx + d[0], vy + d[1]\r\n # piesele pe care le poate captura jucatorul, daca e piesa rege are un scor mai mare\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(next_x, next_y) and self.matrix[next_x][next_y] == '.':\r\n if self.matrix[next_x][next_y] == self.opponent().upper():\r\n weights[3] += 7\r\n else:\r\n weights[3] += 4\r\n # piese care pot fi capturate; la fel daca este piesa rege atunci se scade mai mult scorul\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(back_x, back_y) and self.matrix[back_x][back_y] == '.':\r\n if self.matrix[vx][vy] == self.opponent().upper():\r\n weights[3] -= 6\r\n else:\r\n weights[3] -= 3\r\n # adunam piesa la media sumei date pentru a face AI-ul in caz de egalitate a scorului\r\n # sa imi aleaga piesa care ma pozitioneaza mai bine\r\n if self.move:\r\n return sum(weights) / 4 + points[self.move[0]][self.move[1]]\r\n return sum(weights) / 4\r\n\r\n def __str__(self):\r\n s = ' '\r\n for i in range(8):\r\n s += str(i) + ' '\r\n s += '\\n'\r\n for index, line in enumerate(self.matrix):\r\n s += str(chr(index + ord('a'))) + ' '\r\n for el in line:\r\n s += str(el) + ' '\r\n s += '\\n'\r\n\r\n return s", "def manhattan_distance(user1: User, user2: User) -> float:\r\n common_animes = set.intersection(set(user1.neighbor_anime.keys()),\r\n set(user2.neighbor_anime.keys()))\r\n return sum(abs(anime.neighbor_users[user1] - anime.neighbor_users[user2])\r\n for anime in common_animes)", "def SimpleHeuristic(self, gameState, myPos, goal):\n return self.getMazeDistance(myPos, goal)", "def astar_multi(maze):\n graph_ = Graph(maze.getObjectives())\n\n pq = []\n visited = {}\n\n goals = maze.getObjectives()\n start = maze.getStart()\n\n tie = 1\n #\n # tuple = (f,g,h,x&y,tiebreaker, goals left, currpath, visited)\n # h = min_manhattan(goals, start)\n h = mst_heur(start, goals, graph_)\n\n curr = (h, 0, h, start, goals, 0, [])\n heapq.heappush(pq, curr)\n\n food = None\n while len(pq) > 0:\n curr = heapq.heappop(pq)\n # print(\"curr:\", curr)\n if curr[3] in curr[4]:\n curr[4].remove(curr[3])\n if len(curr[4]) == 0:\n # print(\"DONE\")\n # print(food)\n food = curr\n break\n neighbors = maze.getNeighbors(curr[3][0], curr[3][1])\n for n in neighbors:\n curr_goals_left = curr[4].copy()\n curr_visited = curr[6].copy()\n tie += 1\n\n # print(\"curr[6]: \", curr[6])\n # print(\"n: \", n)\n # print\n\n # h2 = min_manhattan(curr[4], n)\n h2 = mst_heur(n, curr[4], graph_)\n f2 = h2 + curr[1]\n g2 = curr[1] + 1\n\n node_new = (f2, g2, h2, n, curr_goals_left, tie, curr_visited)\n\n if node_new[3] not in visited or node_new[4] not in visited[node_new[3]][1]:\n if node_new[3] not in visited:\n visited[node_new[3]] = (node_new[3], [])\n visited[node_new[3]][1].append(node_new[4])\n node_new[6].append(curr[3])\n heapq.heappush(pq, node_new)\n\n if food is None:\n return []\n\n food[6].append(food[3])\n\n return food[6]", "def astar(maze):\n # TODO: Write your code here\\\n\n start = maze.getStart()\n end = maze.getObjectives()[0] # 0 needed so it's not the list it's the end spot\n\n pq = [] # priority queue - filled with tuple of f, x&y, g(path distance from start)\n heapq.heappush(pq, (manhattan_distance(start, end), start, 0))\n\n visited = set()\n map_ = {}\n solvable = True\n at_collectible = None\n\n while len(pq) > 0:\n\n curr = heapq.heappop(pq)\n curr_pos = curr[1]\n\n if curr_pos == end:\n at_collectible = curr\n break\n\n neighbors = maze.getNeighbors(curr_pos[0], curr_pos[1])\n\n for n in neighbors:\n\n new_curr = (manhattan_distance(n, end) + curr[2] + 1, (n[0], n[1]), curr[2] + 1)\n\n if n not in visited and maze.isValidMove(n[0], n[1]):\n map_[new_curr] = curr\n heapq.heappush(pq, new_curr)\n visited.add(n)\n\n curr = at_collectible\n path = []\n while curr[1] != start:\n path.append(curr[1])\n curr = map_[curr]\n path.append(curr[1])\n path.reverse()\n\n return path", "def manhatam_distance(self) -> int:\n raise NotImplementedError", "def h(self,node):\n \"*** YOUR CODE HERE ***\"\n dist_arr = []\n for goal in self.goals:\n dist_arr.append(manhattan_distance_with_heading(node.state, goal))\n return min(dist_arr)", "def test_7_medium(self):\n grid_S = MAPPGridState.create_from_string(\n [\"...#.........\",\n \"...#.........\",\n \"...#.........\",\n \"...########..\",\n \"..12......34.\",\n \"...###..###..\",\n \"...######....\",\n \"........#....\",\n \"........#....\"])\n \n grid_G = MAPPGridState.create_from_string(\n [\"...#.........\",\n \"...#.........\",\n \"...#.........\",\n \"...########..\",\n \"..34......21.\",\n \"...###..###..\",\n \"...######....\",\n \"........#....\",\n \"........#....\"])\n plan = astar(grid_S,\n lambda s : s == grid_G,\n MAPPDistanceSum(grid_G))\n self.assertEqual(36,sum(a.cost for a in plan))", "def heuristic(current, goal):\r\n\r\n return Vector2.fromCell(current).distanceTo(Vector2.fromCell(goal))", "def cornersHeuristic(state, problem):\n corners = problem.corners # These are the corner coordinates\n walls = problem.walls # These are the walls of the maze, as a Grid (game.py)\n \"*** YOUR CODE HERE ***\"\n \"\"\"\n En este ejercicio me he dado cuenta de un problema de mi definición del espacio de estados:\n - El espacio de estados consiste en tuplas ((x,y), grid), donde (x,y) es la posición en coordenadas\n y grid es la tabla de true/false.\n - El problema es que yo he pensado la tabla grid en forma de matriz matemática, de manera que los índices\n no van de acuerdo con la posición de las esquinas, sinó con los índices de una matriz.\n Para solucionar este problema sin tener que modificar todo lo anterior (dado que no me queda tiempo) lo que he\n tenido que hacer es crear una lista y añadir de forma ordenada los valores true/false, para que se corresponda\n cada uno con su esquina.\n \n Mi heurística consiste en lo siguiente:\n * Calculo la distancia desde la posición en la que me sitúo hasta todos los corners no visitados (los que aún\n tienen comida) y me quedo con la mínima de estas distancias, y con el corner que me de esa mínima.\n * Calculo la distancia desde ese corner (el mínimo de antes) hasta todos los otros posibles corners no visitados\n y de nuevo me quedo con la mínima distancia y con el corner que me da esa mínima.\n * Repito este proceso hasta que no queden corners.\n Entonces lo que hago es definir una nueva lista de corners, newListOfCorners que irá extrayendo los corners a medida\n que su distanca sea calculada. Por ejemplo, si tengo los cuatro corners con comida y estoy en una posición \n aleatoria, la lista newListOfCorners estará llena. Se calculará la distancia a cada corner y el corner que de la \n mínima será extraído de newListOfCorners. Entonces se calculará la distancia desde este corner hasta los restantes\n tres corners de newListOfCorners y el corner de esos tres que me de la mínima será extraído de la lista. Etc...\n \"\"\"\n\n # Ordenamos la lista de True's y False's para que vaya acorde con el orden de la lista corners:\n visitedCorners = []\n visitedCorners.append(state[1][1][0])\n visitedCorners.append(state[1][0][0])\n visitedCorners.append(state[1][1][1])\n visitedCorners.append(state[1][0][1])\n corners = list(corners) # De aquí saco una lista que contenga los corners ordenados.\n # Ahora los corners y la lista de visitedCorners contendrán la información de forma ordenada y coherente\n minimum = 9999999999999999 # Defino un mínimo muy grande para asegurarme que nunca sea superado\n total = 0 # Inicializo el total a cero\n newListOfCorners = [] # Creo una nueva lista para añadir los corners no estudiados\n for corner in corners: # Primero vamos a llenar la lista de corners con los que me interesen: los que tienen comida\n if visitedCorners[corners.index(corner)]: # Miramos que el corner tenga comida, sino pasamos\n newListOfCorners.append(corner) # Si tiene comida, lo añadimos\n minimCorner = corners[0] # Inicializo el minimCorner a un corner aleatorio para que no me de problemas más tarde\n actualState = state[0] # Lo mismo\n\n while not len(newListOfCorners) == 0: # Mientras la lista no esté vacía...\n for corner in newListOfCorners: # Cogemos un corner de la lista\n distanceToCorner = manhattanHeuristicToCorners(actualState, corner) # Calculamos dist. a corner\n if distanceToCorner < minimum: # Calculamos el mínimo\n minimum = distanceToCorner\n minimCorner = corner\n total += minimum # Y lo añadimos al total\n actualState = minimCorner # Reactualizamos cada variable para volver a empezar el bucle\n minimum = 9999999999999999999999999999999\n newListOfCorners.remove(minimCorner)\n return total", "def __cost_to_goal(self, goal_state):\n cost = 0\n for i in range(len(goal_state) * len(goal_state[0])):\n if(i != 0):\n pos_goal = self.__get_position(i, goal_state)\n pos_current = self.__get_position(i, self.puzzle)\n cost += self.__manhattan(pos_current[0], pos_current[1], pos_goal[0], pos_goal[1])\n return cost", "def calc_heuristic(self, state):\n h = 0\n board = state.board.array\n\n for i in range(self._n):\n for j in range(self._n):\n\n if board[i][j] != space_rep:\n tile_as_number = board[i][j]\n correct_x = (tile_as_number - 1) // self._n\n correct_y = (tile_as_number - 1) % self._n\n else:\n continue\n h += calc_diffs(i, j, correct_x, correct_y)\n return h", "def utility(state:State,maximizing_player):\n best_move_score = -1\n #######################[Goal]#########################\n is_current_player_stuck = is_stuck(state,state.player_type)\n other_player = RIVAL if state.player_type == PLAYER else PLAYER\n # Check if stuck\n if is_current_player_stuck:\n if state.player_type == PLAYER:\n state.players_score[state.player_type] -= state.penalty_score\n else:\n state.players_score[state.player_type] += state.penalty_score\n return state.players_score[state.player_type] - state.players_score[other_player] \n ######################################################\n # Else\n #--------------------------------------------------\n ################# Available Steps #################\n #--------------------------------------------------\n player_available_steps = availables(state.board, state.locations[PLAYER])\n h1 = 4-player_available_steps\n h4 = player_available_steps\n #--------------------------------------------------\n ################# Fruits Distance #################\n #--------------------------------------------------\n h2 = -1\n if state.fruits_ttl > 0 and len(state.fruits_dict) > 0:\n min_fruit_dist = float('inf')\n for fruit_loc in state.fruits_dict:\n curr_fruit_dist = Manhattan(state.locations[state.player_type], fruit_loc)\n # Check what is the closest fruit reachable\n if curr_fruit_dist < min_fruit_dist and curr_fruit_dist <= state.fruits_ttl:\n other_player_fruit_dist = Manhattan(state.locations[other_player], fruit_loc)\n if curr_fruit_dist < other_player_fruit_dist:\n min_fruit_dist = curr_fruit_dist\n max_dist = len(state.board)+len(state.board[0])\n h2 = (max_dist*10.0/min_fruit_dist)+1 if min_fruit_dist < float('inf') else -1\n #--------------------------------------------------\n ################# Reachable Squrs #################\n #--------------------------------------------------\n reachables_player = reachables(state.board,state.locations[PLAYER])\n reachables_rival = reachables(state.board,state.locations[RIVAL])\n h3 = reachables_player - reachables_rival # We want more for us\n #--------------------------------------------------\n ################# Combine it all. #################\n #--------------------------------------------------\n if not state.half_game():\n w = 0.8 if h2 > 0 else 1\n best_move_score = w*(h1-h3) + (1-w)*h2 \n else:\n w = 0.7 if h2 > 0 else 1\n best_move_score = w*(h4+h3) + (1-w)*h2 \n\n best_move_score += state.players_score[state.player_type]\n return best_move_score", "def calculate_manhattan(node_a, node_b):\n return (abs(node_a.x - node_b.x) + abs(node_a.y - node_b.y))", "def shortest_manhattan_distance(coordinates):\n current_minimum = sys.maxsize\n\n for x, y in coordinates:\n if abs(x) + abs(y) < current_minimum:\n current_minimum = abs(x) + abs(y)\n\n return current_minimum", "def random_points_ascending_hillclimber(house, all_houses, waters, total_value_map):\n total_value_map_NEW = total_value_map\n\n # check in welke range het huis geplaats kan worden, niet kijkend naar water of andere \n rangex = MAXIMUM_WIDTH - house.width\n rangey = MAXIMUM_HEIGHT - house.length\n\n for x in range(100):\n # maak random x en y coördinaat\n randomizex = rangex * random()\n randomizey = rangey * random()\n\n # bewaar oude locaties\n tempx = house.bottom_left[0]\n tempy = house.bottom_left[1]\n \n # verander locatie\n bottom_left = (randomizex,randomizey)\n house.location(bottom_left)\n\n # als je je huis op nieuwe locatie kan plaatsen\n if place_house(house, all_houses, waters) == True:\n # bereken nieuw waarde map, waarin huis is verplaatst\n total_value_map_temp = 0\n for item in all_houses.values():\n for house in item:\n house.extra_meters()\n total_value_map_temp += house.totalprice()\n\n # als waarde met nieuwe locatie hoger is, verander deze\n if total_value_map_NEW < total_value_map_temp:\n total_value_map_NEW = total_value_map_temp\n # als waarde niet hoger is verander naar oude locatie en bereken weer totale waarde map\n else:\n bottom_left = (tempx,tempy)\n house.location(bottom_left)\n if place_house(house, all_houses, waters) == True:\n for item in all_houses.values():\n for houses in item:\n houses.extra_meters()\n houses.totalprice()\n # als huis niet geplaats kan worden, verander naar oude locatie en bereken weer totale waarde map\n else:\n bottom_left = (tempx,tempy)\n house.location(bottom_left)\n if place_house(house, all_houses, waters) == True:\n for item in all_houses.values():\n for houses in item:\n houses.extra_meters()\n houses.totalprice()\n\n return all_houses, total_value_map_NEW", "def get_cost_of_actions_helper(actions, board, starting_point):\n total_cost = 0\n for action in actions:\n cost = action.piece.get_num_tiles()\n total_cost += cost\n return total_cost", "def getMove(self, grid):\n# global prune\n# prune = 0\n def Terminal(stateTup):\n \"\"\"\n Checks if the node is a terminal node\n Returns eval(state) if it is terminal\n \"\"\"\n state = stateTup[0]\n maxDepth = self.depthLimit\n if stateTup[1] == maxDepth:\n val = self.h.get(str(state.map))\n if val == None:\n Val = Eval(state)\n self.h[str(state.map)] = Val\n return Val\n else:\n return val\n elif len(stateTup[0].getAvailableMoves()) == 0:\n val = self.h.get(str(state.map))\n if val == None:\n Val = Eval(state)\n self.h[str(state.map)] = Val\n return Val\n else:\n return val\n\n def Eval(state):\n \"\"\"\n This is the eval function which combines many heuristics and assigns\n weights to each of them\n Returns a single value\n \"\"\"\n\n# H1 = htest2(state)\n# return H1\n H2 = h1(state)*monotonic(state)\n return H2\n\n\n def h1(state):\n Max = state.getMaxTile()\n left = len(state.getAvailableCells())/16\n if state.getCellValue([0,0]) == Max:\n v = 1\n else:\n v= 0.3\n Max = Max/1024\n return Max*left*v\n\n def mono(state):\n mon = 0\n# for i in range(4):\n# row = 0\n# for j in range(3):\n# if state.map[i][j] > state.map[i][j+1]:\n# row+=1\n# if row == 4:\n# mon += 1\n# for i in range(4):\n# column = 0\n# for j in range(3):\n# if state.map[j][i] > state.map[j+1][i]:\n# column +=1\n# if column == 4:\n# mon +=1\n#\n#\n# return mon/8\n for i in range(4):\n if all(earlier >= later for earlier, later in zip(grid.map[i], grid.map[i][1:])):\n mon+=1\n\n return mon/8\n\n def monotonic(state):\n cellvals = {}\n Path1 = [(3,0),(3,1),(3,2),(3,3),(2,3),(2,2),(2,1),(2,0),\n (1,0),(1,1),(1,2),(1,3),(0,3),(0,2),(0,1),(0,0)]\n for i in Path1:\n cellvals[i] = state.getCellValue(i)\n mon = 0\n for i in range(4):\n if cellvals.get((i,0)) >= cellvals.get((i,1)):\n if cellvals.get((i,1)) >= cellvals.get((i,2)):\n if cellvals.get((i,2)) >= cellvals.get((i,3)):\n mon +=1\n for j in range(4):\n if cellvals.get((0,j)) >= cellvals.get((1,j)):\n if cellvals.get((1,j)) >= cellvals.get((2,j)):\n if cellvals.get((2,j)) >= cellvals.get((3,j)):\n mon+=1\n return mon/8\n\n\n\n def htest2(state):\n score1 = 0\n score2 = 0\n r = 0.5\n\n Path1 = [(3,0),(3,1),(3,2),(3,3),(2,3),(2,2),(2,1),(2,0),\n (1,0),(1,1),(1,2),(1,3),(0,3),(0,2),(0,1),(0,0)]\n Path2 = [(3,0),(2,0),(1,0),(0,0),(0,1),(1,1),(2,1),(3,1),\n (3,2),(2,2),(1,2),(0,2),(0,3),(1,3),(2,3),(3,3)]\n valDict = {}\n for n in range(16):\n valDict[Path1[n]] = state.getCellValue(Path1[n])\n for n in range(16):\n if n%3 == 0:\n self.emergency()\n cell1 = valDict.get(Path1[n])\n cell2 = valDict.get(Path2[n])\n score1 += (cell1) * (r**n)\n score2 += (cell2) * (r**n)\n return max(score1,score2)\n\n\n def Maximize(stateTup,A,B):\n \"\"\"\n Returns a tuple of state,eval(state)\n Takes in a stateTup(tuple of grid + depth of the grid), alpha,\n and beta\n \"\"\"\n self.emergency()\n t = Terminal(stateTup)\n if t != None:\n return (None, t)\n\n maxChild , maxUtility = None,-999999999\n state = stateTup[0]\n Map = self.dict.get(str(state.map))\n if Map == None:\n children = []\n for M in range(4):\n g = state.clone()\n if g.move(M):\n children.append(g)\n self.dict[str(state.map)] = children\n else:\n children = Map\n for child in children:\n childTup = (child,stateTup[1]+1)\n utility = Minimize(childTup,A,B)[1]\n if utility > maxUtility:\n maxChild , maxUtility = child , utility\n if maxUtility >= B:\n# global prune\n# prune +=1\n break\n if maxUtility > A:\n A = maxUtility\n\n return (maxChild,maxUtility)\n\n\n def Minimize(stateTup,A,B):\n \"\"\"\n Returns a tuple of state,eval(state)\n Takes in a stateTup(tuple of grid + depth of the grid), alpha,\n and beta\n \"\"\"\n self.emergency()\n t = Terminal(stateTup)\n if t != None:\n return (None, t)\n\n minChild , minUtility = None,999999999\n state = stateTup[0]\n Map= self.dict.get(str(state.map))\n if Map == None:\n cells= state.getAvailableCells()\n children = []\n tiles = [2,4]\n for i in cells:\n for j in tiles:\n g = state.clone()\n g.insertTile(i,j)\n children.append(g)\n self.dict[str(state.map)] = children\n else:\n children = Map\n for child in children:\n childTup = (child,stateTup[1]+1)\n utility = Maximize(childTup,A,B)[1]\n if utility < minUtility:\n minChild , minUtility = child , utility\n if minUtility <= A:\n# global prune\n# prune +=1\n break\n if minUtility < B:\n B = minUtility\n\n return (minChild,minUtility)\n\n\n\n def decision(grid):\n \"\"\"\n Decision function which returns the move which led to the state\n \"\"\"\n child = Maximize((grid,0),-999999999,999999999)[0]\n Child = child.map\n g = grid.clone()\n for M in range(4):\n if g.move(M):\n if g.map == Child:\n # global prune\n # global pruneLog\n # pruneLog.append(prune)\n # print(prune)\n # print(sum(pruneLog)/len(pruneLog))\n return M\n g = grid.clone()\n\n self.dict = {}\n self.h = {}\n self.prevTime = time.clock()\n self.depthLimit = 1\n self.mL = []\n self.over = False\n while self.over == False:\n self.depthLimit +=1\n try :\n self.mL.append(decision(grid))\n\n except KeyError:\n# print(self.depthLimit)\n return self.mL[-1]\n except IndexError:\n return random.randint(0,3)\n self.Alarm(time.clock())\n return self.mL[-1]", "def get_distance(self, heuristic=\"\"):\n # If no heuristic is specified, used the default\n if(heuristic == \"\"):\n heuristic = BoardPath._heuristic\n\n if(heuristic == \"manhattan\"):\n return self.calculate_manhattan_dist()\n elif(heuristic == \"euclidean\"):\n return self.calculate_euclidean_dist()\n elif(heuristic == \"made_up\"):\n return self.calculate_made_up_dist()\n else:\n sys.exit()", "def heuristic_1(node):\n x_node, y_node = node.state.location()\n goals = node.state.grid.components.white_walkers\n goals.append(node.state.grid.components.dragon_stone)\n distance = [np.sqrt((x_node - x)**2 + (y_node - y)**2) for x, y in goals]\n return distance[np.argmin(distance)]", "def ant_colony(map, alpha=3, beta=4, m=10, rho=0.2, q=1, its_max=20):\n n = len(map)\n tau = np.ones((n, n))\n eta = 1/map.D\n for i in range(n):\n eta[i, i] = 0\n paths_array = np.zeros((m, n), int)\n its = 0\n path_best = np.zeros((its_max, n), int)\n distance_best = np.zeros(its_max)\n\n while its < its_max:\n paths_length = np.zeros(m)\n for i in range(m):\n source = np.random.randint(n)\n visited = []\n unvisited = list(range(n))\n node_now = source\n node_next = -1\n paths_array[i, 0] = source\n\n for j in range(1, n):\n visited.append(node_now)\n unvisited.remove(node_now)\n prob_roulette = np.array([0]*n, dtype=float)\n for k in unvisited:\n prob_roulette[k] = (pow(tau[node_now, k], alpha)\n * pow(eta[node_now, k], beta))\n prob_roulette = prob_roulette/sum(prob_roulette)\n cum_roulette = prob_roulette.cumsum()\n cum_roulette -= np.random.uniform(0, 1)\n node_next = list(cum_roulette >= 0).index(True)\n paths_array[i, j] = node_next\n paths_length[i] += map.D[node_now, node_next]\n node_now = node_next\n paths_length[i] += map.D[node_now, source]\n\n if its == 0:\n distance_best[its] = paths_length.min()\n path_best[its] = paths_array[paths_length.argmin()].copy()\n else:\n if distance_best[its-1] < paths_length.min():\n distance_best[its] = distance_best[its-1]\n path_best[its] = path_best[its-1].copy()\n else:\n distance_best[its] = paths_length.min()\n path_best[its] = paths_array[paths_length.argmin()].copy()\n\n add_tau = np.zeros((n, n))\n\n for i in range(m):\n for j in range(n):\n row = paths_array[i, j]\n col = paths_array[i, (j+1) % n]\n add_tau[row][col] += q/paths_length[i]\n\n tau = (1 - rho)*tau + add_tau\n\n its += 1\n\n return Hamiltonian(path_best[-1], map)", "def calculate_move_fast_reward(self, packet):\r\n return get_distance_location(packet.gamecars[self.index].Location, self.previous_car_location)", "def examineMaze(self, gameState):\n w = self.walls.width\n h = self.walls.height\n walls = self.walls.deepCopy()\n food1 = self.getFoodYouAreDefending(gameState)\n food2 = self.getFood(gameState)\n\n # Save map as 0, 1, 2 and 3 (0:walls, 1:spaces, 2:babies, 3:food)\n for x in range(w):\n for y in range(h):\n if walls[x][y]:\n walls[x][y] = 0\n elif food1[x][y]:\n walls[x][y] = 2\n elif food2[x][y]:\n walls[x][y] = 2\n else:\n walls[x][y] = 1\n\n roomsDisplay = []\n # Detect doors and spaces. Spaces are now negative\n for x in range(w):\n for y in range(h):\n if walls[x][y] > 0:\n exitsNum = 0\n if walls[x][y - 1] != 0:\n exitsNum += 1\n if walls[x][y + 1] != 0:\n exitsNum += 1\n if walls[x - 1][y] != 0:\n exitsNum += 1\n if walls[x + 1][y] != 0:\n exitsNum += 1\n if exitsNum == 1 or exitsNum == 2:\n walls[x][y] = -1 * walls[x][y]\n roomsDisplay.append((x, y))\n elif exitsNum == 0:\n # We erase unaccessible cells\n walls[x][y] = 0\n else:\n # These are doors or big rooms, we leave them positive\n pass\n\n # Create roomsGraph: every room has a number, some cells and some doors\n roomsGraph = []\n doorsGraph = []\n for x in range(1, w - 1):\n for y in range(1, h - 1):\n if walls[x][y] < 0:\n spacesNum = 0\n if walls[x][y - 1] < 0:\n spacesNum += 1\n if walls[x][y + 1] < 0:\n spacesNum += 1\n if walls[x - 1][y] < 0:\n spacesNum += 1\n if walls[x + 1][y] < 0:\n spacesNum += 1\n if spacesNum < 2:\n endOfPath = False\n graphNode = {\"path\": [], \"doors\": [], \"food\": 0, \"isBig\": False}\n auxx = x\n auxy = y\n while not endOfPath:\n graphNode[\"path\"].append((x, y))\n graphNode[\"food\"] += -walls[x][y] - 1\n walls[x][y] = 0\n xx = x\n yy = y\n if walls[x][y - 1] < 0:\n yy = y - 1\n elif walls[x][y + 1] < 0:\n yy = y + 1\n elif walls[x - 1][y] < 0:\n xx = x - 1\n elif walls[x + 1][y] < 0:\n xx = x + 1\n else:\n endOfPath = True\n if walls[x][y - 1] > 0:\n if [(x, y - 1), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x, y - 1), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x, y - 1), []]))\n if walls[x][y + 1] > 0:\n if [(x, y + 1), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x, y + 1), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x, y + 1), []]))\n if walls[x - 1][y] > 0:\n if [(x - 1, y), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x - 1, y), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x - 1, y), []]))\n if walls[x + 1][y] > 0:\n if [(x + 1, y), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x + 1, y), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x + 1, y), []]))\n x = xx\n y = yy\n roomsGraph.append(graphNode)\n x = auxx\n y = auxy\n\n # Create doorsGraph: every door has a number, and goes to other rooms or other doors\n for j, door in enumerate(doorsGraph):\n for i, room in enumerate(roomsGraph):\n for aDoor in room[\"doors\"]:\n if aDoor == j:\n doorsGraph[j][1] = doorsGraph[j][1] + [i]\n (x, y) = doorsGraph[j][0]\n adjacentCells = [(x+1, y), (x-1, y), (x, y+1), (x, y-1)]\n adjacentDoors = []\n # Check adjacent doors and add them to the current door (door structure is [pos, adjRooms, adjDoors]\n for p in adjacentCells:\n # Skip if wall\n if self.walls[p[0]][p[1]]:\n continue\n # Skip if door\n isRoom = False\n for room in doorsGraph[j][1]:\n if p in roomsGraph[room][\"path\"]:\n isRoom = True\n break\n if not isRoom:\n # Add if existing door\n doorFound = False\n for i, neighborDoor in enumerate(doorsGraph):\n if neighborDoor[0] == p:\n adjacentDoors.append(i)\n doorFound = True\n break\n # Create if non existing door and add\n if not doorFound:\n adjacentDoors.append(len(doorsGraph))\n doorsGraph.append([p, []])\n doorsGraph[j].append(adjacentDoors)\n\n # Create doorsDistance: maps what doors can be accessed from other doors\n roomsMapper = {}\n doorsMapper = {}\n isRoom = util.Counter()\n for i, door in enumerate(doorsGraph):\n doorsMapper[door[0]] = i\n isRoom[door[0]] = 0\n for i, room in enumerate(roomsGraph):\n for p in room[\"path\"]:\n roomsMapper[p] = i\n isRoom[p] = 1\n\n # Create self variables\n self.doorsGraph = doorsGraph\n self.roomsGraph = roomsGraph\n self.roomsMapper = roomsMapper\n self.doorsMapper = doorsMapper\n self.isRoom = isRoom\n\n # # Find dead ends (rooms with only one door)\n # deadRooms = {}\n # deadDoors = {}\n # # deaderDoors = {}\n # # deaderRooms = {}\n # for i, room in enumerate(roomsGraph):\n # if len(room[\"doors\"]) == 1:\n # deadRooms[i] = room[\"doors\"][0]\n # deadDoors[room[\"doors\"][0]] = 1\n # numdR = 0\n # aliveR = -1\n # for adjRoom in doorsGraph[room[\"doors\"][0]][1]:\n # if adjRoom not in deadRooms:\n # numdR += 1\n # aliveR = adjRoom\n # if numdR + len(doorsGraph[room[\"doors\"][0]][2]) == 1:\n # if aliveR >= 0:\n # deaderRooms[aliveR] = room[\"doors\"][0]\n # for adjDoor in roomsGraph[aliveR][\"doors\"]:\n # if adjDoor == room[\"doors\"][0]:\n # continue\n # deaderDoors[adjDoor] = 1.0\n # else:\n # deaderDoors[doorsGraph[room[\"doors\"][0]][2][0]] = 1.0\n\n\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # for r in deaderRooms:\n # for p in roomsGraph[r][\"path\"]:\n # roomsCounter[0][p] = 0.4\n # for d in deaderDoors:\n # roomsCounter[1][doorsGraph[d][0]] = 0.4\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n\n # print deadDoors\n # print\n # print deadRooms\n # print \"-----------------\"\n\n # deadEndsChanged = True\n # while deadEndsChanged:\n # deadEndsChanged = False\n # for i, room in enumerate(roomsGraph):\n # numAliveDoors = 0\n # aliveDoor = 0\n # deadDoor = []\n # for door in room[\"doors\"]:\n # if door not in deadDoors:\n # numAliveDoors += 1\n # aliveDoor = door\n # else:\n # deadDoor.append(door)\n # if numAliveDoors == 1:\n # aliveRoom = 0\n # aliveNeighborDoor = 0\n # for door in deadDoor:\n # # aliveNeighborDoor += len(doorsGraph[door][2])\n # for neighborRoom in doorsGraph[door][1]:\n # if neighborRoom not in deadRooms:\n # aliveRoom += 1\n # if aliveRoom == len(deadDoor):\n # deadRooms[i] = aliveDoor\n # deadDoors[aliveDoor] = 1\n # deadEndsChanged = True\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # roomsCounter[0][doorsGraph[aliveDoor][0]] = 1\n # for p in room[\"path\"]:\n # roomsCounter[1][p] = 1\n # for p in deadDoor:\n # roomsCounter[2][doorsGraph[p][0]] = 1\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n\n # Find dead ends (rooms with doors that only go to other dead ends, except one)\n # Danger, it is theoretically possible to have a map only with dead ends, which may make this crash\n # deadEndsChanged = True\n # while deadEndsChanged:\n # deadEndsChanged = False\n # for i, door in enumerate(doorsGraph):\n # if i not in deadDoors:\n # numOpenRooms = 0\n # openRoom = 0\n # for j, room in enumerate(door[1]):\n # if room not in deadRooms:\n # numOpenRooms += 1\n # openRoom = j\n # if numOpenRooms + len(door[2]) == 1:\n # for room in door[1]:\n # if room != openRoom:\n # deadRooms[room] = i\n # deadDoors[j] = 1\n # deadEndsChanged = True\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # roomsCounter[0][door[0]] = 1\n # for rr in door[1]:\n # print rr\n # if rr in deadRooms:\n # for p in roomsGraph[rr][\"path\"]:\n # roomsCounter[1][p] = 1\n # else:\n # for p in roomsGraph[rr][\"path\"]:\n # roomsCounter[3][p] = 1\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n # print deadDoors\n # print\n # print deadRooms\n # print \"-----------------\"\n\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # roomsCounter[1][(6, 9)] = 0.4\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # for r in deadRooms:\n # for p in roomsGraph[r][\"path\"]:\n # roomsCounter[0][p] = 0.4\n # for d in deadDoors:\n # roomsCounter[1][doorsGraph[d][0]] = 0.4\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n # Show every room\n roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n for room in roomsGraph:\n for p in room[\"path\"]:\n if len(room[\"doors\"]) > 1:\n roomsCounter[0][p] = 0.4\n else:\n roomsCounter[2][p] = 0.4\n # Show every door\n for door in doorsGraph:\n roomsCounter[1][door[0]] = 0.4\n # Display rooms and doors (red: rooms with at least one exit; orange: rooms with 1 exit; blue: doors\n self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")", "def _get_heuristic(self, game):\r\n board = game._get_bord()\r\n player = game._current_player\r\n size = game._size\r\n\r\n # [1] The more pawns one has compared to the number of pawns\r\n # the opponent has, the better.\r\n\r\n count_delta = self._get_fields_delta(board, player)\r\n\r\n # [2] The further advanced a pawn, the better.\r\n # Free paths are great.\r\n\r\n adv_board = deepcopy(board)\r\n usr_now_blocked = [False] * size\r\n opp_now_blocked = [False] * size\r\n\r\n # Traversal of board backwards for performance reasons.\r\n # (free paths flags)\r\n # Of course this could also be done by flipping calculation of\r\n # the row indices. But that seems counterintuitive.\r\n for r in range(size - 1, -1, -1):\r\n for c in range(size):\r\n # Row indices for both perspectives.\r\n # We will be travelling the board from both ends\r\n # at the same time.\r\n r_opp = r\r\n r_usr = size - 1 - r\r\n\r\n # Perspective of Player.USER.\r\n if board[r_usr][c] == Player.OPP:\r\n # If this field is occupied by the Player.OPP\r\n # and since we are travelling the board from the final row\r\n # a pawn of the Player.USER can reach,\r\n # we can set a flag to remember, that this col is now\r\n # blocked for all Player.USER's pawns less advanced.\r\n usr_now_blocked[c] = True\r\n elif board[r_usr][c] == Player.USER:\r\n # Evaluate the position of the Player.USER's pawn:\r\n # - the further advanced (given as value in r_usr),\r\n # the better.\r\n # - if the column ahead is free from Player.OPP's pawns,\r\n # gets a bonus.\r\n # To prevent each pawn from taking 2 fields as a first\r\n # step, subtracted 1 from value.\r\n adv_board[r_usr][c] *= (r_usr - 1) * (r_usr - 1)\r\n\r\n if not usr_now_blocked[c]:\r\n adv_board[r_usr][c] *= 10 # TODO: choose best weight\r\n\r\n # Perspective of Player.OPP.\r\n if board[r_opp][c] == Player.USER:\r\n # If this field is occupied by the Player.USER\r\n # and since we are travelling the board from the final row\r\n # a pawn of the Player.OPP can reach,\r\n # we can set a flag to remember, that this col is now\r\n # blocked for all Player.OPP's pawns less advanced.\r\n opp_now_blocked[c] = True\r\n elif board[r_opp][c] == Player.OPP:\r\n # Evaluate the position of the Player.USER's pawn:\r\n # - the further advanced (given as value in r_usr),\r\n # the better.\r\n # - if the column ahead is free from Player.OPP's pawns,\r\n # gets a bonus.\r\n # To prevent each pawn from taking 2 fields as a first\r\n # step, subtracted 1 from value.\r\n adv_board[r_opp][c] *= (r_opp - 1) * (r_opp - 1)\r\n\r\n if not opp_now_blocked[c]:\r\n adv_board[r_opp][c] *= 10 # TODO: choose best weight\r\n\r\n adv_delta = self._get_fields_delta(adv_board, player)\r\n\r\n # We refrain from adjusting weights of both aspects. Could be\r\n # optimized by collecting data.\r\n return adv_delta + count_delta", "def ManhattanDistance(point1, point2):\n\n x1 = point1.x\n x2 = point2.x\n y1 = point1.y\n y2 = point2.y\n\n manhattandistance = np.abs(x1 - x2) + np.abs(y1 - y2)\n\n return manhattandistance", "def astar(maze):\n # TODO: Write your code here\n q = queue.PriorityQueue()\n q.put((1, maze.getStart(), 0))\n traversed = []\n path = []\n tracker = {maze.getStart(): None} #Tracker needs to contain tuples\n\n while q:\n curr_loc = q.get()\n\n if curr_loc[1] not in traversed: #Add to traversed points list\n traversed.append(curr_loc[1])\n\n if maze.isObjective(curr_loc[1][0], curr_loc[1][1]):\n finished = curr_loc[1]\n break\n\n nextpath = maze.getNeighbors(curr_loc[1][0], curr_loc[1][1]) #Search neighbor points\n for point in nextpath:\n if point not in traversed and maze.isValidMove(point[0], point[1]):\n q.put((manhattan_distance(point, maze) + curr_loc[2] + 1, point, curr_loc[2] + 1))\n tracker[point] = curr_loc[1]\n\n while finished:\n path.insert(0, finished)\n finished = tracker[finished]\n\n return path", "def astar(grid, heuristic):\r\n grid.clearVisited()\r\n grid.clearPath()\r\n last = None\r\n queue = PriorityQueue()\r\n\r\n init = grid.getStart()\r\n goals = grid.getGoals()\r\n if len(goals) == 0:\r\n return\r\n goal = goals[0]\r\n if goal is None or init is None:\r\n return\r\n\r\n # (totalDistance, node, previous, startDistance)\r\n queue.put((0, init, None, heuristic(init, goal)))\r\n done = False\r\n while not done:\r\n if queue.empty():\r\n done = True\r\n else:\r\n current = queue.get()\r\n if current[1] == goal:\r\n last = current\r\n done = True\r\n else:\r\n visited = grid.getVisited()\r\n if not current[1] in visited:\r\n currentSuccessors = grid.getNeighbors(current[1])\r\n for successor in currentSuccessors:\r\n coordinate = successor[0]\r\n if not coordinate in visited:\r\n startDistance = current[3] + successor[1]\r\n queue.put((startDistance + heuristic(coordinate, goal), coordinate, current, startDistance))\r\n grid.addVisited(current[1])\r\n\r\n # Reconstruct the path.\r\n if last:\r\n current = last\r\n path = []\r\n while current:\r\n path += [current[1]]\r\n current = current[2]\r\n path = list(reversed(path))\r\n grid.setPath(path)", "def heuristic(self):\n game_score = (self.get_game_score(), 0.85)\n road_score = (self.get_longest_road_score(), 0.05)\n steps_score = (self.get_steps_available_score(), 0.05)\n reachable_nodes_score = (self.get_reachable_nodes_score(), 0.05)\n heuristics = [game_score, road_score, steps_score, reachable_nodes_score]\n result = 0\n for score, weight in heuristics:\n result += score * weight\n if DEBUG_PRINT:\n print(f\"Heuristic value for location {self.loc} is {result}\")\n print(f\"\\treachable score: {reachable_nodes_score[0] * reachable_nodes_score[1]}\")\n print(f\"\\tsteps score: {steps_score[0] * steps_score[1]}\")\n print(f\"\\tlongest road score: {road_score[0] * road_score[1]}\")\n print(f\"\\tgame score: {game_score[0] * game_score[1]}\")\n return result", "def minMoves(maze, x, y):\n\n def maze_guard():\n \"\"\"Guard function to block oversized dimensions\"\"\"\n cell_guard = all([1 <= len(row) <= 100 for row in maze])\n row_guard = 1 <= len(maze) <= 100\n return cell_guard and row_guard\n\n def walk_maze(finish):\n \"\"\"Walks the maze, finding the shortest path including all coins.\n Finishes when reach the coordenate finish, a tuple with row and\n column numbers\n \"\"\"\n i, j = (0, 0)\n result = -1\n weight = -1\n while nodes:\n i, j, path, coins = nodes.popleft()\n cell = maze[i][j]\n if (i, j) == finish:\n weight, result = check_result(coins, path, weight, result)\n elif cell != 1:\n adjacent_nodes(i, j, path, coins)\n\n return result\n\n def adjacent_nodes(i, j, path, coins):\n \"\"\"Adds the node in positions i, j, with its path added to\n accumulated path. The path is transformed into a binary\n number, i.e, 2 ** (i * n + j), being n the number of rows\n in the maze matrix.\n \"\"\"\n def neighbour(x, y):\n this_path = 2 ** (i * n + j)\n if not this_path & path:\n coin = coins + 1 if maze[i][j] == 2 else coins\n nodes.append((x, y, path + this_path, coin))\n\n coord = [(i + 1, j, i + 1 < n), (i - 1, j, i - 1 >= 0),\n (i, j + 1, j + 1 < m), (i, j - 1, j - 1 >= 0)]\n _ = [neighbour(x, y) for x, y, test in coord if test]\n\n if not maze_guard():\n return -1\n\n n = len(maze)\n m = len(maze[0])\n nodes = deque([(0, 0, 0, 0)])\n return walk_maze((x, y))", "def heuristic(self, node):\n res = 0\n np_node = np.array(node)\n for i in range(4):\n for j in range(4):\n tmp = self.goal[i][j]\n if tmp != 0:\n point = np.where(np_node == tmp)\n x = int(point[0])\n y = int(point[1])\n distance = np.sqrt(np.square(x - i) + np.square(y - j))\n res += distance\n return res", "def hillclimber_algorithm(iterations, water_layout, max_houses, ts, neighbourhood=None, score = None, mode=None): \n \n ################################ start by creating a random neighbourhood ###################\n \n # standard neighbourhood distribution of the houses\n amount_sfh, amount_bungalow, amount_maison = max_houses*0.6, max_houses*0.25, max_houses*0.15\n\n if mode == \"greedy\":\n file_name = \"Hillclimber-greedy\"\n elif mode == \"bestrandom\":\n file_name = \"Hillclimber-bestrandom\"\n else:\n file_name = \"Hillclimber-random\"\n\n # create table\n table = []\n if neighbourhood == None:\n # create neighbourhood, place water and build houses, collect neighbourhood and score\n neighbourhood = []\n neighbourhood = waterbuilder(water_layout, neighbourhood)\n neighbourhood, score = housebuilder(max_houses, amount_maison, amount_bungalow, amount_sfh, neighbourhood)\n\n ################################ now iterate using the hill climber method ####################\n\n # for loop through iterations\n for i in range(iterations):\n\n # create a deepcopy of the current neighbourhood layout\n temp_neighbourhood = deepcopy(neighbourhood)\n\n # choose a random house\n random_house = rd.choice([h for h in temp_neighbourhood if h.name != \"WATER\"])\n temp_neighbourhood.remove(random_house)\n \n # get house type and id\n type_house = random_house.type\n ID = random_house.id\n\n # make house with same id and type\n house = House(type_house,str(ID))\n if location_checker(house, temp_neighbourhood) == False:\n while location_checker(house, temp_neighbourhood) == False:\n house = House(type_house, i)\n \n temp_neighbourhood.append(house)\n\n # calculate new shortest_distances\n temp_neighbourhood = distance_check(temp_neighbourhood)\n\n # now calculate the score of this new neighbourhood\n new_score = scorecalculator(temp_neighbourhood)\n\n # compare the score of the old neighbourhood to the new one, choose the best one\n if new_score > score:\n neighbourhood = deepcopy(temp_neighbourhood)\n score = new_score\n\n # save progress in table\n table.append([i, max_houses, score, new_score])\n\n # save results in dataframe\n df_hillclimber = pd.DataFrame(table, columns = [\"iteration\", \"max_houses\", \"old_score\", \"new_score\"])\n \n # make a visualisation of the best score and save it\n create_map(neighbourhood, score, file_name, ts, str(file_name+\"_map-\"+str(max_houses)))\n \n # create a plot of the progress\n performanceplot(file_name, iterations, max_houses, ts, df_hillclimber.iteration, df_hillclimber.old_score)\n\n return neighbourhood, score", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n # Useful information you can extract from a GameState (pacman.py)\n newPos = currentGameState.getPacmanPosition()\n newFood = currentGameState.getFood()\n newGhostStates = currentGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n \n \n \"*** YOUR CODE HERE ***\"\n #the number of food in successorGameState\n numFood = newFood.count()\n #print([newGhostStates[i].configuration.pos for i in range(len(newGhostStates))])\n #the effect of the distance between pacman and normal ghost, scared ghost\n minPacGhost = newFood.height + newFood.width\n eatScaredChance_Max = 0\n eatableGhost = None\n for i in range(len(newGhostStates)):\n pacGhostDis = manhattanDistance(newPos,newGhostStates[i].configuration.pos)\n eatScaredChance = max([0,newGhostStates[i].scaredTimer-pacGhostDis])\n if pacGhostDis < minPacGhost and newGhostStates[i].scaredTimer==0:\n minPacGhost = pacGhostDis\n if eatScaredChance > eatScaredChance_Max:\n eatScaredChance_Max= eatScaredChance\n eatableGhost = newGhostStates[i].configuration.pos\n \n #print((eatScaredChance_Max,newPos,eatableGhost))\n #the secure distance is 3, after both pacman and ghost make move, pacman is still safe\n #find the most dangerous distance\n pac_Ghost_Distance = min([minPacGhost,3])\n #find the closet food to pacman\n minDistance = newFood.height * newFood.width\n manhattan_PriorityQueue = util.PriorityQueue()\n for y in range(newFood.height):\n for x in range(newFood.width):\n if newFood[x][y] == True:\n manhattan_PriorityQueue.push((x,y),manhattanDistance(newPos,(x,y)))\n for i in range(5):\n if manhattan_PriorityQueue.isEmpty():\n break\n else:\n maze = mazeDistance(newPos,manhattan_PriorityQueue.pop(),currentGameState)\n if maze < minDistance:\n minDistance = maze\n #calculate the evaluation value\n evaluation = currentGameState.getScore() + 200 * eatScaredChance_Max + 500 * pac_Ghost_Distance + (1/(minDistance))\\\n / max([numFood,1])\n #print(evaluation)\n return evaluation", "def manhattan_dist(idx, pos, n):\n\n row_dist = abs(pos // n - idx // n)\n col_dist = abs(pos % n - idx % n)\n return row_dist + col_dist", "def door_matt_cost(clusters, cluster, sp_cache):\n # checks shortest path table, finds all path larger than 3,\n # then the cluster between will be used as door matt.\n dm_sum = 0\n for path in sp_cache.values():\n if (len(path) > 2) and (cluster.label in path[1:-1]):\n src = _find_cluster(clusters, path[0])\n dest = _find_cluster(clusters, path[-1])\n dm_sum += _c2c_cost(src, dest)\n return int(dm_sum)", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n def getClosest(locs, pos):\n closest = 100000\n closestPos = ()\n totalDist = 0\n for loc in locs:\n dist = manhattanDistance(pos, loc)\n totalDist += dist\n if dist < closest:\n closest = dist\n closestPos = loc\n return (closest, closestPos, totalDist)\n\n #gamestate stuff\n currPos = currentGameState.getPacmanPosition()\n Food = currentGameState.getFood()\n GhostStates = currentGameState.getGhostStates()\n ScaredTimes = [ghostState.scaredTimer for ghostState in GhostStates]\n Capsules = currentGameState.getCapsules()\n value = currentGameState.getScore()\n\n #find out stuff about the pellets remaining\n foodPellets = Food.asList()\n numPellets = len(foodPellets)\n closestFood, closestFoodPos, totalFoodDist = getClosest(foodPellets, currPos)\n\n #find closest ghost\n closestGhostDist = 1000000\n closestGhost = ()\n for ghost in GhostStates:\n distToGhost = manhattanDistance(currPos, ghost.getPosition())\n if distToGhost < closestGhostDist:\n closestGhostDist = distToGhost\n closestGhost = ghost\n\n #find capsules to make ghosts scared, looking at pacman actions I'm not sure if this works or only the \n #scared timer part decides if a capsule gets eaten\n closestCapsuleDist, closestCapsule, totalCapsuleDist = getClosest(Capsules, currPos)\n if currPos in Capsules:\n value += 25\n if closestCapsuleDist == 1:\n value += 1\n\n \n #if ghost is scared prioritize eating them\n if closestGhost.scaredTimer != 0:\n if closestGhostDist > 0:\n value += closestGhost.scaredTimer / closestGhostDist\n if closestGhostDist == 0:\n value += 1000\n\n #avoid getting caught by ghosts\n if closestGhostDist == 1 and closestGhost.scaredTimer == 0:\n value -= 100\n\n #incentivize eating pellets if ghosts aren't scared and avoid getting stuck trying to choose\n if totalFoodDist > 0 and closestGhost.scaredTimer == 0:\n value -= totalFoodDist / numPellets\n value += 1 / closestFood\n\n return value", "def calculateCosts(self):\n self.costs = 0\n for house in self.houses:\n if not house.distance == 1000:\n self.costs += house.distance * 9\n for battery in self.batteries:\n self.costs += battery.costs\n return self.costs", "def registerInitialState(self, gameState):\n '''\n Make sure you do not delete the following line. If you would like to\n use Manhattan distances instead of maze distances in order to save\n on initialization time, please take a look at\n CaptureAgent.registerInitialState in captureAgents.py.\n '''\n self.startpos=gameState.getAgentPosition(self.index)\n CaptureAgent.registerInitialState(self, gameState)\n self.midwidth = gameState.data.layout.width / 2\n self.carryfoods = 0\n self.foodnum = len(self.getFood(gameState).asList())\n self.foods = self.getFood(gameState).asList()\n self.hisdefendfoods = self.getFoodYouAreDefending(gameState).asList()\n self.height = gameState.data.layout.height\n self.hispos = None\n initmap = InitMap(self,gameState)\n self.safefoodlist,self.dangerfoodlist = initmap.gainlist()\n self.deadends = initmap.gaindeadends() \n self.indanger = False\n '''\n Your initialization code goes here, if you need any.\n '''" ]
[ "0.7025016", "0.7011789", "0.6967061", "0.6955104", "0.69499224", "0.69179034", "0.6900574", "0.6883391", "0.68710554", "0.677832", "0.65591604", "0.6524315", "0.65080816", "0.65063334", "0.6491734", "0.6422433", "0.6416312", "0.6414841", "0.63929284", "0.63912034", "0.63885194", "0.63284945", "0.63283485", "0.62270075", "0.6205155", "0.6180276", "0.6146897", "0.61161745", "0.6088711", "0.6062633", "0.6036935", "0.60350174", "0.602797", "0.60142535", "0.59755796", "0.5971103", "0.5958929", "0.593404", "0.5926649", "0.5902148", "0.5893331", "0.5892021", "0.58802557", "0.5871248", "0.5829682", "0.5801298", "0.5782132", "0.57588667", "0.5758356", "0.5736402", "0.5720947", "0.57131386", "0.5709494", "0.5694248", "0.5690472", "0.56731987", "0.5660393", "0.5654944", "0.56520563", "0.5643828", "0.56135654", "0.56015164", "0.55668885", "0.5560494", "0.5557144", "0.55560845", "0.554437", "0.5538855", "0.5537809", "0.5526894", "0.552166", "0.5516684", "0.55088615", "0.55013394", "0.55007005", "0.54891264", "0.5473817", "0.54522043", "0.5450367", "0.54392236", "0.5429859", "0.54289585", "0.542674", "0.542281", "0.5420686", "0.54139525", "0.5392704", "0.5389186", "0.53377926", "0.53284526", "0.5327117", "0.5324322", "0.5316312", "0.53078496", "0.5306852", "0.5299444", "0.5285317", "0.52845263", "0.5277667", "0.5277519" ]
0.66257834
10
Create a new description tag Descriptions are always required, never hidden, and never limited.
def __init__(self, *args): super().__init__('description', *args, required=True, hidden=False, limit=-1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_descr(self, attr_name):", "def description(self, newDescription=None):\n pass", "def add_description(self, description):\n self.add_metric('descript', description)", "def add_description(self, desc):\n self.description = desc", "def description(self, description: str):\n return self.swag({\n 'description': normalize_indent(description),\n })", "def with_description(self, description):\r\n self.description = description\r\n return self", "def description(self, description):\n\n self._set_field(\"description\", description)", "def description(self, description) :\n\t\ttry :\n\t\t\tself._description = description\n\t\texcept Exception as e:\n\t\t\traise e", "def define_description(self):\n self._description = 'NODDI-based processing of DWI datasets.'", "def description(self, value):\n self._update_values('description', value)", "def description():", "def add_oopsy_form_new_description():\n form = AddOopsyForm({'description': 'Add new', 'new_description': 'New description', 'points': 3})\n return form", "def description(self, description):\n self._description = description", "def description(self, description):\n self._description = description", "def description(self, description):\n self._description = description", "def description(self, description):\n self._description = description", "def _set_description(\n meta: Dict, description: Optional[Union[str, bool]] = None, **kwargs\n) -> Dict:\n if description is False or description is None:\n show_description_value = MetaWidget.DESCRIPTION_OPTION_NOTHING\n description = \"\"\n elif isinstance(description, str):\n show_description_value = MetaWidget.DESCRIPTION_OPTION_CUSTOM\n else:\n raise IllegalArgumentError(\n \"When using the add_card_widget or add_service_card_widget, 'description' must be \"\n \"'text_type' or None or False. Type is: {}\".format(type(description))\n )\n meta.update(\n {\n MetaWidget.SHOW_DESCRIPTION_VALUE: show_description_value,\n MetaWidget.CUSTOM_DESCRIPTION: description,\n }\n )\n return meta", "def set_description(self, description):\n self.description = description", "def description(self, description):\n \n if description is not None and len(description) > 128: \n raise ValueError(\"Invalid value for `description`, length must be less than `128`\")\n\n self._description = description", "def set_description(self, description):\r\n self.__description = description", "def set_desc(self, item_desc):\r\n self.description = item_desc", "def description(self, value):\n if len(value):\n self._description = value\n self._description = self._wrap_line(value, self._width)\n\n # Add a blank line\n self._description.append('')", "def set_description(self):\n if 'description' not in self.data:\n if self.verbose:\n click.echo('Adding empty descriptions to root')\n self.data['description'] = ''", "def add_oopsy_form_set_description():\n form = AddOopsyForm({'description': 'Left mess', 'new_description': '', 'points': 3})\n return form", "def description(self, description):\n if description is not None and len(description) > 255:\n raise ValueError(\"Invalid value for `description`, length must be less than or equal to `255`\")\n\n self._description = description", "def description(self, value):\n self.definition.description = value", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n\n self._description = description", "def description(self, description):\n if description is not None and len(description) > 512:\n raise ValueError(\"Invalid value for `description`, length must be less than or equal to `512`\") # noqa: E501\n if description is not None and len(description) < 0:\n raise ValueError(\"Invalid value for `description`, length must be greater than or equal to `0`\") # noqa: E501\n if (description is not None and not re.search(r'^[\\s\\S]*$', description)): # noqa: E501\n raise ValueError(r\"Invalid value for `description`, must be a follow pattern or equal to `/^[\\s\\S]*$/`\") # noqa: E501\n\n self._description = description", "def description(self, new_description):\r\n self.set({\"description\": new_description})", "def test_description_markdown_with_custom_options() -> None:\n soup = generate_case(\n \"description_markdown\",\n GenerationConfiguration(\n markdown_options={\n \"cuddled-lists\": True,\n }\n ),\n )\n\n assert (\n str(soup.find(\"span\", class_=\"description\"))\n == \"\"\"<span class=\"description\"><p>DOC </p> <ul> <li>List 1</li> <li>List 2</li> </ul> </span>\"\"\"\n )", "def set_description(self, description):\n self.__description = description", "def set_description(self, data):\n self._description = self._uni(data)", "def description(self, description: str):\n\n self._description = description", "def description(self, description: str):\n\n self._description = description", "def description(self, description: str):\n\n self._description = description", "def description(self, description: str):\n\n self._description = description", "def description(self, new_description):\n self.set_description(new_description, self._xml)\n self._description = self._read_description(self._xml)", "def BoostDesc_create(desc=None, use_scale_orientation=None, scale_factor=None): # real signature unknown; restored from __doc__\n pass", "def set_description(self, description):\n self._description = description", "def set_description(self, sNewDescription):\n\t\tcall_sdk_function('PrlVmDev_SetDescription', self.handle, sNewDescription)", "def set_description(self, desc: str) -> None:\n self.metadata.data[\"description\"] = desc", "def setDescription(self, value):\n return self.getDbRecord().setColumnValue(DESCRIPTION_COLUMN, value)", "def description(self, description):\n if description is None:\n raise ValueError(\"Invalid value for `description`, must not be `None`\")\n\n self._description = description", "def _description(self):\n return None", "def testDescription(self):\n dis_meta = DiseaseMeta()\n\n self.util.stringTypeTest(self, dis_meta, \"description\")\n\n self.util.stringPropertyTest(self, dis_meta, \"description\")", "def test_deprecated_not_in_description() -> None:\n soup = generate_case(\"deprecated\", GenerationConfiguration(deprecated_from_description=False))\n\n tests.html_schema_doc_asserts.assert_deprecated(soup, [False] * 5)", "def get_description():\n desc = {\"description\": __doc__, \"data\": True, \"cache\": 600}\n today = datetime.date.today()\n desc[\"arguments\"] = [\n dict(\n type=\"csector\",\n name=\"csector\",\n default=\"IA\",\n label=\"Select state/sector to plot\",\n ),\n dict(\n type=\"date\",\n name=\"sdate\",\n default=f\"{today.year}/01/01\",\n label=\"Start Date:\",\n min=\"2000/01/04\",\n max=today.strftime(\"%Y/%m/%d\"),\n ),\n dict(\n type=\"date\",\n name=\"edate\",\n default=today.strftime(\"%Y/%m/%d\"),\n label=\"End Date:\",\n min=\"2000/01/04\",\n max=today.strftime(\"%Y/%m/%d\"),\n ),\n dict(\n type=\"select\",\n name=\"d\",\n default=\"0\",\n options=PDICT,\n label=\"Select Drought Classification (at and above counted):\",\n ),\n dict(\n type=\"select\",\n name=\"w\",\n default=\"percent\",\n options=PDICT2,\n label=\"How to express time for plot:\",\n ),\n dict(type=\"cmap\", name=\"cmap\", default=\"plasma\", label=\"Color Ramp:\"),\n ]\n return desc", "def description(self, description: ConfigNodePropertyString):\n\n self._description = description", "def description(self, newDescription=None):\n if newDescription != None:\n self._setValue('description', newDescription)\n return self._getValue('description')", "def get_description(self, request_value):\n self.write(request_value['description'], 1)\n self.write('')", "def EventContentMissionExcelAddDescription(builder, Description):\n return AddDescription(builder, Description)", "def description(self):", "def get_description():\n desc = dict()\n desc[\"cache\"] = 3600\n desc[\"data\"] = True\n desc[\n \"description\"\n ] = \"\"\"This plot is not meant for interactive use, but a backend for\n SPS plots.\n \"\"\"\n desc[\"arguments\"] = [\n dict(\n type=\"text\",\n name=\"pid\",\n default=\"202012300005-KDVN-WWUS83-SPSDVN\",\n label=\"IEM generated up to 35 char product identifier:\",\n ),\n dict(\n type=\"int\",\n default=0,\n name=\"segnum\",\n label=\"Product Segment Number (starts at 0):\",\n ),\n ]\n return desc", "def description(self):\n pass", "def description(self):\n pass", "def add_AdDescription(self, url, description):\n layout = BoxLayout(size_hint_y = 4)\n layout.add_widget(AsyncImage(source = url))\n layout.add_widget(Label(text = description))\n self.layout.add_widget(layout)", "def request_description_update():\n global should_update_description\n should_update_description = True", "def description(self, item):\n assert isinstance(item, self.kind), f\"Got '{type(item).__name__}' instead of '{self.kind.__name__}'\"\n assert str(item) == self.name\n\n for name, value in self.specific_attributes:\n setattr(item, name, value)\n\n self._description = item", "def Description(self) -> str:", "def Description(self) -> str:", "def set_description(self, sDescription):\n\t\tcall_sdk_function('PrlVirtNet_SetDescription', self.handle, sDescription)" ]
[ "0.695261", "0.69262964", "0.67537904", "0.6662091", "0.6635096", "0.6542747", "0.64898026", "0.64358616", "0.64276004", "0.64130193", "0.6404517", "0.63842374", "0.63729984", "0.63729984", "0.63729984", "0.63729984", "0.6369401", "0.635684", "0.63517755", "0.63242537", "0.6318103", "0.6314391", "0.63112664", "0.6296133", "0.62793326", "0.62782156", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.62455535", "0.6241383", "0.6234365", "0.6229615", "0.62289184", "0.62263155", "0.6204098", "0.6204098", "0.6204098", "0.6204098", "0.6200559", "0.61708355", "0.6163287", "0.6149529", "0.61331195", "0.61310554", "0.6127634", "0.60885787", "0.6084494", "0.60742813", "0.60630816", "0.6057849", "0.60540104", "0.6046441", "0.60425323", "0.6029952", "0.60139644", "0.5991203", "0.5991203", "0.598918", "0.5978343", "0.59781367", "0.5964673", "0.5964673", "0.5960099" ]
0.65792376
5
Generate the header string for this description If the description is empty, return an empty string. Otherwise, the raw data is joined together and returned with no '' components.
def to_header(self): if not self.filled: return '' return "\n".join(self.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _header_string( self, title='title' ): \n return_str = ''\n return_str += '{}\\n\\n'.format( title )\n return_str += '{} atoms\\n'.format( len(self.atoms) )\n if len(self.bonds) != 0:\n return_str += '{} bonds\\n\\n'.format( len(self.bonds) )\n return_str += '{} atom types\\n'.format( len(self.atom_types ) )\n if len(self.bond_types) != 0:\n return_str += '{} bond types\\n\\n'.format( len(self.bond_types ) )\n return_str += '\\n'\n return return_str", "def generate_header(self, header=None):\n if header is None:\n header = self.header\n\n lines = [self.PREFIX_HEAD + '!b']\n for k, v in header.items():\n if k in ('labels', 'categories'):\n v = ', '.join(v)\n elif k == 'draft':\n v = repr(v)\n lines.append(self.HEADER_FMT % (k, v))\n lines.append(self.PREFIX_END)\n return '\\n'.join([_f for _f in lines if _f]) + '\\n'", "def headerstring(self):\n sss = 'IVO LEGEND:\\n'\n sss += ' Created from 152 or 155\\n'\n sss += ' Pct number\\n'\n sss += ' Found in 152 (Y/N)\\n'\n sss += ' Found in 155 (Y/N)\\n'\n sss += ' Ivo serial number\\n'\n sss += ' PEB used for opening\\n'\n sss += ' Opening date/time\\n'\n sss += ' Date/time of first vote\\n'\n sss += ' PEB used for closing\\n'\n sss += ' Closing date/time\\n'\n sss += ' Date/time of last vote\\n'\n sss += ' Number of vote events 152\\n'\n sss += ' Number of vote events 155\\n'\n sss += ' Number of vote events 155 by precinct\\n'\n sss += ' Number of late vote events 152\\n'\n sss += ' Pct numbers\\n'\n sss += ' Ballot styles\\n'\n sss += ' Memory collection times\\n'\n return sss", "def construct_header(self): \n \n # create the individual labels\n hdr_bits = [hb.format(hdr) for hb, hdr in zip(self.row_base, self.headers)]\n \n # stick it all together and return with hdr_sep underneath\n hdr_str = f\"|{'|'.join(hdr_bits)}|\\n\"\n return hdr_str + self.hdr_sep * (len(hdr_str)-1) + \"\\n\"", "def header_text(self):\n return os.linesep.join(map(str, self.headers))", "def header(self):\n return encode_as_str([self.unsealed_header(), self.seal_data], sep='`')", "def header(self):\n return encode_as_str([self.unsealed_header(), self.seal_data], sep='`')", "def header(self) -> str:\n value = self.kind\n if self.options:\n value += '; ' + '; '.join(f'{k}={v}' for k, v in self.options.items())\n return value", "def table_header(self):\n title = 'HYPERPARAMETER FINE-TUNING RESULTS'\n title_len = len(title)\n extra_spaces = self.max_length - title_len\n left_spaces = extra_spaces // 2\n right_spaces = extra_spaces - left_spaces - 1\n\n return '| ' + (left_spaces * ' ') + title + (right_spaces * ' ') + ' |\\n'", "def header( self ):\n\t\treturn '; '.join( [ '='.join(i) for i in self.items() ] )", "def BuildHeaderString (text):\r\n\r\n return t.BuildHeaderString (text)", "def buildheader(self):\n \n lines = {}\n for k in self._d:\n lines[self._d[k]]='# %d %s'%(self._d[k],k.upper())\n #sort the new keys\n nkeys= lines.keys()\n nkeys.sort()\n #join them together with newlines\n ans = ''\n for k in nkeys:\n ans=ans+\"%s\\n\"%lines[k]\n return ans", "def description(self):\n return self._hdr", "def description(self):\n return self._hdr", "def first_header():\n return \"\"\"\n<th>Target\n<th>Date\n<th colspan=\"2\">UT\n<th>Exp\n<th>Cycle\n<th>No. of\n<th>Filters\n<th>XxY\n<th>Speed\n<th>NX1xNY1\n<th>X1\n<th>Y1\n<th>NX2xNY2\n<th>X2\n<th>Y2\n<th>Grat.\n<th>Slit\n<th>Slit\n<th>ID\n<th>PI\n<th align=\"left\">Comment\n\"\"\"", "def unsealed_header(self):\n return encode_as_str([self.height, self.timestamp, self.target, self.parent_hash, self.is_genesis, self.merkle], sep='`')", "def unsealed_header(self):\n return encode_as_str([self.height, self.timestamp, self.target, self.parent_hash, self.is_genesis, self.merkle], sep='`')", "def __str__(self):\n header_string = ''\n for key, value in self.define.items():\n header_string += '#define {} {}\\n'.format(key, self.format(value))\n return header_string", "def createHeaderRecord(self):\n\n # ascii-character limit for every header record information (in bytes)\n lenVersion = 8\n lenLocalPatientID = 80\n lenLocalRecordingID = 80\n lenStartDate = 8\n lenStartTime = 8\n lennBytesHeader = 8\n lenEDFPlus = 44\n lennDataRecord = 8\n lenDurationDataRecord = 8\n lennSignals = 4\n \n HeaderInfolist = [self.Version, self.LocalPatientID, self.LocalRecordingID, self.StartDate, self.StartTime, self.nBytesHeader, self.EDFPlus,\\\n self.nDataRecord, self.DurationDataRecord, self.nSignals]\n lenHeaderInfo = [lenVersion, lenLocalPatientID, lenLocalRecordingID, lenStartDate, lenStartTime, lennBytesHeader, lenEDFPlus, lennDataRecord,\\\n lenDurationDataRecord, lennSignals]\n\n for i in range(len(HeaderInfolist)):\n maxlen = lenHeaderInfo[i]\n if len(HeaderInfolist[i]) > maxlen:\n # truncates the string if length is greater than limit\n HeaderInfolist[i] = HeaderInfolist[i][:maxlen] \n \n else:\n HeaderInfolist[i] = HeaderInfolist[i].ljust(maxlen)\n \n # converts the list to a string with no separator in between elements\n self.HeaderRecord = ''.join(HeaderInfolist) \n\n # concatenates each BioSignal TechInfo to the Header Record string\n for i in range(len(self.BioSignals[0].TechInfo)):\n for x in range(len(self.BioSignals)):\n self.HeaderRecord = self.HeaderRecord + self.BioSignals[x].TechInfo[i]", "def get_header():\n title = \"\"\"\n ___ __\n | o _|_ _|_ _ ._ (_ _ ._ _|_ o ._ _ _ ._ _|_ /\\ ._ _. | _ o _\n | \\/\\/ | |_ |_ (/_ | __) (/_ | | |_ | | | | (/_ | | |_ /--\\ | | (_| | \\/ _> | _>\n /\"\"\"\n\n sub_title = \"Get sentiments from your tweets fast and easy!\"\n header = bcolors.HEADER + title + bcolors.ENDC + \"\\n\" + bcolors.WARNING + \"\\t\\t\" + sub_title + bcolors.ENDC + \"\\n\"\n return header", "def design_report_header(self):\n rstr = nl() + \" \" + nl() + t('table border-collapse= \"collapse\" border=\"1px solid black\" width=100%') + nl()\n rstr += t('tr') + nl()\n row = [0, '<object type= \"image/PNG\" data= \"cmpylogoSeatAngle.png\" height=60 ></object>',\n '<font face=\"Helvetica, Arial, Sans Serif\" size=\"3\">Created with</font>' \"&nbsp\" \"&nbsp\" \"&nbsp\" \"&nbsp\" \"&nbsp\" '<object type= \"image/PNG\" data= \"Osdag_header.png\" height=60 ''&nbsp\" \"&nbsp\" \"&nbsp\" \"&nbsp\"></object>']\n rstr += html_space(1) + t('td colspan=\"2\" align= \"center\"') + space(row[0]) + row[1] + t('/td') + nl()\n rstr += html_space(1) + t('td colspan=\"2\" align= \"center\"') + row[2] + t('/td') + nl()\n rstr += t('/tr') + nl()\n\n rstr += t('tr') + nl()\n rstr += design_summary_row(0, \"Company Name\", \"detail\", text_two=self.company_name, is_row=False)\n rstr += design_summary_row(0, \"Project Title\", \"detail\", text_two=self.project_title, is_row=False)\n rstr += t('/tr') + nl()\n\n rstr += t('tr') + nl()\n rstr += design_summary_row(0, \"Group/Team Name\", \"detail\", text_two=self.group_team_name, is_row=False)\n rstr += design_summary_row(0, \"Subtitle\", \"detail\", text_two=self.sub_title, is_row=False)\n rstr += t('/tr') + nl()\n\n rstr += t('tr') + nl()\n rstr += design_summary_row(0, \"Designer\", \"detail\", text_two=self.designer, is_row=False)\n rstr += design_summary_row(0, \"Job Number\", \"detail\", text_two=self.job_number, is_row=False)\n rstr += t('/tr') + nl()\n\n rstr += t('tr') + nl()\n rstr += design_summary_row(0, \"Date\", \"detail\", text_two=time.strftime(\"%d /%m /%Y\"), is_row=False)\n rstr += design_summary_row(0, \"Client\", \"detail\", text_two=self.client, is_row=False)\n rstr += t('/tr')\n rstr += t('/table') + nl() + \" \" + nl()\n\n rstr += t('hr')\n rstr += t('/hr') + nl() + \" \" + nl()\n return rstr", "def _write_header(self, head_msg=None):\n now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n header = \"\\n%s\\nDateTime: %s \\nMessage: %s \\n\" % (\"*\" * 100, now, head_msg)\n\n return header", "def build_header_1(self, header_len=b'\\x00\\x00\\x00\\x00', data_len=b'\\x00\\x00\\x00\\x00'):\n self.header_1 = b''\n header_1_dict = {'preamble': b'\\x50\\x4f',\n 'packet_type': b'\\x01\\x00\\x00\\x50',\n 'header_len': header_len + b'\\x02\\x00\\x00\\x00\\x00\\x00\\x00\\x00',\n 'data_len': data_len,\n 'agent_guid': b'{%s}' % self.agent_guid,\n 'agent_guid_padding': b'\\x00' * 90 + b'\\x01\\x00\\x00\\x00',\n 'agent_hostname': b'%s' % self.agent_hostname,\n 'hostname_padding': b'\\x00' * (32 - len(self.agent_hostname)) + b'\\x00' * 48}\n\n for item in header_1_dict:\n self.header_1 += header_1_dict[item]\n return self.header_1", "def header():\n record = cfg.get_current_site_record()\n header = \"{0} ({1})\".format(record['url'], record['id'])\n size = len(header) + 2 + 2\n return \"\"\"{sep}\n# {header} #\n{sep}\"\"\".format(sep='#'*size, header=header)", "def header(self, format=None):\n return [\" ID \",\n \"East\",\n \"North\",\n \"TARGET ELEV\",\n \" LENGTH\",\n \" AZ\",\n \" DIP\",\n \"PLAN ELEV\"]", "def header(self, as_list=False, separator='\\t'):\n if not self.attrs():\n return None\n if as_list:\n return self.attrs()\n else:\n return separator.join(self.attrs())", "def get_config_header(_config_global, _debug_log, _dpid, _hardware):\n return ''", "def _make_header(title: str, category: int, description: str, slug: str, image_file_name: Optional[str] = None) -> str:\n\n current_date = _get_current_time()\n category = _get_category(category)\n social_image = SOCIAL_IMAGE_TEMPLATE.format(image_file_name) if image_file_name else \"\"\n header = HEADER_TEMPLATE.format(title, current_date, slug, category, description, social_image)\n\n if social_image:\n figure_template = FIGURE_TEMPLATE.format(social_image)\n header += figure_template\n\n return header", "def build_markdown_header(title, date, author, categories, tags, slug,\r\n attachments=None):\r\n header = 'Title: %s\\n' % title\r\n if date:\r\n header += 'Date: %s\\n' % date\r\n if author:\r\n header += 'Author: %s\\n' % author\r\n if categories:\r\n header += 'Category: %s\\n' % ', '.join(categories)\r\n if tags:\r\n header += 'Tags: %s\\n' % ', '.join(tags)\r\n if slug:\r\n header += 'Slug: %s\\n' % slug\r\n if attachments:\r\n header += 'Attachments: %s\\n' % ', '.join(attachments)\r\n header += '\\n'\r\n return header", "def get_header(self, title):\n self.header = '<!DOCTYPE html>' \\\n '<html>' \\\n '<head>' \\\n '<title>Harm Brugge - ' + title + '</title>' \\\n '<link rel=\"icon\" href=\"../resources/img/dna.png\"/>' \\\n '<link href=\"../resources/css/bootstrap.min.css\" rel=\"stylesheet\">' \\\n '<link href=\"../resources/css/main.css\" rel=\"stylesheet\">' \\\n '<script type=\"text/javascript\" src=\"../resources/js/jquery.js\"></script>' \\\n '<script src=\"../resources/js/bootstrap.min.js\"></script>' \\\n '<script type=\"text/javascript\" src=\"../resources/js/bootbox.min.js\"></script>' \\\n '</head>' \\\n '<body>' \\\n '<div class=\"container shadow\">' \\\n '<div class=\"logo\">' \\\n '<h1></h1>' \\\n '</div>' \\\n '<br/>' \\\n '<div class=\"row content\">' \\\n '<div class=\"content-main\">' \\\n '<br/>' \\\n '<p class=\"lead content-title\">' + title + '</p>'\n return self.header", "def __getHeader(self):\n\n return (\"\\\\rtf%d\" # RTF version\n \"\\\\%s\" # Character set used in document\n \"\\\\deff%d\" # Index of default font\n \"\\\\deflang%d\\n\" # Default language\n % (self.rtfVersion, self.charset, self.defaultFont,\n self.defaultLang) +\n self.__getFontTable() +\n self.__getColorTable() +\n self.__getListTables() +\n self.__getGenerator())", "def _write_header(self):\n msg = self._write_executive_control_deck()\n msg += self._write_case_control_deck()\n return msg", "def headerString_(self):\n header = common.prog_name + ' (version ' + common.prog_version_str + \\\n ') running on ' + \\\n time.ctime(time.time())+'\\n\\n' + \\\n common.prog_name+'. Working options:\\n'\n #print self.job_type_name \n header = header +\\\n ' scheduler ' + self.scheduler_name + '\\n'+\\\n ' job type ' + self.job_type_name + '\\n'+\\\n ' working directory ' + common.work_space.topDir()\\\n + '\\n'\n return header", "def CSVHeader(self):\n \t\n return ','.join('\"{}\"'.format(Statistics.attrs[i][1]) \n for i in sorted(Statistics.attrs.keys()))", "def _generate_header_template() -> str:\n return LICENCE_HEADER_TEMPLATE.format(\n licence_identifier=configuration.get_value(ConfigurationVariable.FILE_LICENCE_IDENTIFIER),\n author=\"${owner}\",\n date=\"${years}\",\n )", "def gen_header():\n return (\n '<?xml version=\"1.0\" encoding=\"UTF-8\"?><!DOCTYPE html '\n + 'PUBLIC \"-//W3C//DTD XHTML 1.1//EN\" '\n + '\"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd\">'\n + '<html xmlns=\"http://www.w3.org/1999/xhtml\"> '\n + '<head><meta '\n + 'http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\"/> '\n + '</head> <body>')", "def asstring(self, short=False, header=True, summary=True,\n description=False):\n raise NotImplementedError", "def __str__(self):\r\n return unicode(self.header)", "def generate_header(name: str) -> str:\n return MARKDOWN_HEADER.format(name.capitalize(), date.today())", "def get_header():\n try:\n yml_iter = cfg.yml_config[\"header\"]\n except:\n # Probably no \"comments\" section in the yml-file.\n return \"\"\n\n return (\"\\n\".join(yml_iter) + \"\\n\\n\") if yml_iter is not None else \"\\n\"", "def get_string(self):\n this_column_specifier = \"l\" * self._num_cols\n this_column_headers = TABLE_COLSEP.join(\n [str(header_elt) for header_elt in self._header])\n this_table_header = TABLE_HEADER_TEMPLATE.substitute(\n column_specifier = this_column_specifier,\n caption = str(self._caption),\n tag = str(self._tag))\n if self._flip:\n this_table_content = (TABLE_ROWSEP_NOLINE + os.linesep).join(\n [TABLE_COLSEP.join(\n [self._header[row_num]] + [\n str(row_elt) for row_elt in self._rows[row_num]])\n for row_num in xrange(self._num_rows)])\n else:\n this_table_content = (TABLE_ROWSEP_NOLINE + os.linesep).join(\n [TABLE_COLSEP.join(self._header)] + [TABLE_COLSEP.join(\n [str(row_elt) for row_elt in row]) for row in self._rows])\n return os.linesep.join([this_table_header,\n ENDHEADER,\n this_table_content,\n TABLE_FOOTER])", "def to_header(self):\n\n return self._header_block", "def create_header(self, tables: List[Dict], schema: bool = False) -> str:\n header = \"\"\n if \"func\" in self.state:\n header += gt.sql_alchemy_func_import + \"\\n\"\n if self.postgresql_dialect_cols:\n header += (\n gt.postgresql_dialect_import.format(\n types=\",\".join(self.postgresql_dialect_cols)\n )\n + \"\\n\"\n )\n if self.constraint:\n header += gt.unique_cons_import + \"\\n\"\n if self.im_index:\n header += gt.index_import + \"\\n\"\n if schema and tables[0].table_schema:\n schema = tables[0].table_schema.replace('\"', \"\")\n header += \"\\n\" + gt.gino_init_schema.format(schema=schema)\n else:\n header += \"\\n\" + gt.gino_init\n return header", "def _printable(self):\n toPrint = \"CQC Header. Version: \" + str(self.version) + \" \"\n toPrint = toPrint + \"Type: \" + str(self.tp) + \" \"\n toPrint = toPrint + \"App ID: \" + str(self.app_id) + \" \"\n toPrint += \"Length: \" + str(self.length)\n return toPrint", "def _header(self, pam=False):\r\n if pam or self.magicnum == b'P7':\r\n header = \"\\n\".join((\r\n \"P7\",\r\n \"HEIGHT %i\" % self.height,\r\n \"WIDTH %i\" % self.width,\r\n \"DEPTH %i\" % self.depth,\r\n \"MAXVAL %i\" % self.maxval,\r\n \"\\n\".join(\"TUPLTYPE %s\" % unicode(i) for i in self.tupltypes),\r\n \"ENDHDR\\n\"))\r\n elif self.maxval == 1:\r\n header = \"P4 %i %i\\n\" % (self.width, self.height)\r\n elif self.depth == 1:\r\n header = \"P5 %i %i %i\\n\" % (self.width, self.height, self.maxval)\r\n else:\r\n header = \"P6 %i %i %i\\n\" % (self.width, self.height, self.maxval)\r\n if sys.version_info[0] > 2:\r\n header = bytes(header, 'ascii')\r\n return header", "def write_header(_metadata, rename_padding=False):\n template = \"\"\"\\\n VERSION {version}\n FIELDS {fields}\n SIZE {size}\n TYPE {type}\n COUNT {count}\n WIDTH {width}\n HEIGHT {height}\n VIEWPOINT {viewpoint}\n POINTS {points}\n DATA {data}\n \"\"\"\n str_metadata = _metadata.copy()\n\n if not rename_padding:\n str_metadata['fields'] = ' '.join(_metadata['fields'])\n else:\n new_fields = []\n for f in _metadata['fields']:\n if f == '_':\n new_fields.append('padding')\n else:\n new_fields.append(f)\n str_metadata['fields'] = ' '.join(new_fields)\n str_metadata['size'] = ' '.join(map(str, _metadata['size']))\n str_metadata['type'] = ' '.join(_metadata['type'])\n str_metadata['count'] = ' '.join(map(str, _metadata['count']))\n str_metadata['width'] = str(_metadata['width'])\n str_metadata['height'] = str(_metadata['height'])\n str_metadata['viewpoint'] = ' '.join(map(str, _metadata['viewpoint']))\n str_metadata['points'] = str(_metadata['points'])\n tmpl = template.format(**str_metadata)\n return tmpl", "def as_text(self):\r\n header_text = \"|--- SCHC Fragment Header {}---|\\n\"\r\n return self.base_as_text(header_text)", "def header_string(headers_dict):\r\n header_list = []\r\n\r\n if 'Content-Type' in headers_dict:\r\n header_list.append(headers_dict['Content-Type'] + \"\\n\")\r\n if 'Date' in headers_dict:\r\n header_list.append(headers_dict['Date'] + \"\\n\")\r\n if 'Content-MD5' in headers_dict:\r\n header_list.append(headers_dict['Content-MD5'] + \"\\n\")\r\n\r\n return \"\".join(header_list) # Note that trailing \\n's are important\r", "def _get_seq_header_string(sequence_len: int) -> str:\n return \"SQ SEQUENCE {} AA; {} MW; {} CRC64;\".format(\n sequence_len,\n 12345, # Does not need to be set for ProtGraph\n \"45D66B0D27B69FCD\" # Does not need to be set for ProtGraph\n )", "def getTableHeader(self, filename):\n hdr = \"\"\n with open(filename, \"r\") as f:\n for line in f:\n if line[0] == \">\":\n hdr += line\n else:\n return hdr", "def get_header(data):\n header = \"\"\n for item in data:\n if len(item) > 10:\n header = item\n break\n return header", "def generate_header(value_type, num_elements, element_multiplier, imag, name_length, name):\n result = []\n\n result += Ensemble.int32_to_bytes(value_type) # Value Type\n result += Ensemble.int32_to_bytes(num_elements) # Number of elements\n result += Ensemble.int32_to_bytes(element_multiplier) # Element Multiplier\n result += Ensemble.int32_to_bytes(imag) # Image\n result += Ensemble.int32_to_bytes(name_length) # Name Length\n result += name.encode() # Name\n\n return result", "def get_export_header(self):\n\n name = self.get_name()\n\n if (self.name == \"input::nodes\"):\n\n name = \"user-specified\"\n\n grp_string = self.get_grp_string()\n\n if grp_string != \"\":\n\n grp_string = \" \" + grp_string\n\n return \"\\n!*!Label \" + self.path[1] + \" ..\" + grp_string + \" .. \" + name + \"\\n\"", "def __make_header__(self):\n header = lashead.Header(point_format=0)\n return header", "def __str__(self):\n final_str = [\"Binary File Header:\"]\n for item in BINARY_FILE_HEADER_FORMAT:\n final_str.append(\"\\t%s: %s\" % (item[1],\n str(getattr(self, item[1]))))\n return \"\\n\".join(final_str)", "def generate_bcd_header():\n outstr = \"change_score,change_significant\"\n return outstr", "def prefix(self):\n if len(self.desc) > 0:\n return self.desc + \" \"\n\n return \"\"", "def empty_header_data(cls):\n empty_struct = struct.Struct(cls.HEADER_STRUCT_FORMAT_STR)\n packed_data = empty_struct.pack(0, 0, 0, 0)\n return packed_data", "def _printable(self) -> str:\n return \"CQC Type header. Type=\" + str(self.type) + \" | Length=\" + str(self.length)", "def get_header_string(aeff_str, e0, spe, j2_range, a_values):\n header_lines = list()\n header_lines.append(\n '! Effective SM interaction generated by OLS and VCE with Aeff = ' +\n '%s' % str(aeff_str))\n header_lines.append('! Zero body term: %10.6f' % (e0,))\n header_lines.append('! Index n l j tz')\n for j, idx in zip(j2_range, range(len(spe))):\n header_lines.append('! %d %d %d %d %d' % (idx+1, 0, 1, j, 1))\n header_lines.append('! ')\n spe_line = '-999 ' + ' '.join(['%10.6f' % e for e in spe])\n spe_line += ' %d %d 0.000000' % (a_values[0], a_values[2])\n header_lines.append(spe_line)\n return '\\n'.join(header_lines)", "def get_string(self):\n this_column_specifier = (\n TABLE_NUMROWS_SEP + \"l\" + TABLE_NUMROWS_SEP + TABLE_NUMROWS_SEP + \n TABLE_NUMROWS_SEP.join([\"c\" for col in xrange(self._num_cols)]) +\n TABLE_NUMROWS_SEP)\n this_column_headers = TABLE_COLSEP.join(\n [\"\"] + [str(top_header_elt) for top_header_elt in self._top_header])\n this_chart_header = CHART_HEADER_TEMPLATE.substitute(\n column_specifier = this_column_specifier,\n caption = self._caption,\n tag = self._tag,\n column_headers = this_column_headers)\n this_chart_content = (TABLE_ROWSEP + os.linesep).join(\n [TABLE_COLSEP.join([str(left_elt)] +\n [str(self._cells[top_elt][left_elt])\n for top_elt in self._top_header])\n for left_elt in self._left_header])\n return os.linesep.join([this_chart_header, this_chart_content,\n CHART_FOOTER])", "def csv_make_header(self, fileobj, title, comment=\"\"):\n fileobj.write(csv_line( [\"#Title:\", title] ) )\n fileobj.write(csv_line( [\"#Comment:\", comment] ) )\n #Any other useful comment s trings?\n fileobj.write('#\"First column is the sample phi motor rotation, in radians\"\\n' )\n fileobj.write('#\"Next 6 columns are the XY leg positions in mm, relative to the central (neutral) position.\"\\n' )\n fileobj.write('#\"Next are 2 columns for the stopping criterion parameters.\"\\n' )\n #Line of header info\n fileobj.write(csv_line( ['Phi', 'LegA_X', 'LegA_Y', 'LegB_X', 'LegB_Y', 'LegC_X', 'LegC_Y', 'CountFor', 'CountValue', 'Comment'] ) )", "def csv_header(self, hostname):\n result = \"\"\n for interface in self.interfaces:\n result += hostname + \"-pkts-in[\" + interface + \"],\"\n result += hostname + \"-pkts-out[\" + interface + \"],\"\n result += hostname + \"-bytes-in[\" + interface + \"],\"\n result += hostname + \"-bytes-out[\" + interface + \"],\"\n return result", "def __get_header_tags(self):\n tag = \"<th>{}</th>\"\n\n return (tag * len(self.__rows)).format(*self.__rows)", "def generate_header(gene, variant):\n return '>{}_{}'.format(gene, variant)", "def build_header(self, app_name, host_name, message_id, priority,\n process_id, version, timestamp, sd):\n head = SyslogMessageHead()\n head.appname = app_name or '-'\n head.hostname = host_name or '-'\n head.messageid = message_id or '-'\n head.priority = priority or '-'\n head.processid = process_id or '-'\n head.timestamp = timestamp or '-'\n head.version = version or '-'\n head.sd = sd or {}\n return head", "def generate_hour_header():\n cf = config.Config()\n outstr = \"total_rotation,total_acceleration,total_distance,number_missing,\"\n outstr += \"oc1_time,oc2_time,oc3_time,oc4_time,oc5_time,oc6_time,oc7_time,\"\n outstr += \"oc8_time,oc9_time,oc10_time,oc11_time,oc12_time,oc13_time,\"\n outstr += \"oc14_time,oc15_time,oc16_time,oc17_time,oc18_time,oc19_time,\"\n outstr += \"oc20_time,oc21_time,oc22_time,oc23_time,oc24_time,oc25_time,\"\n outstr += \"oc26_time,oc27_time,oc28_time,oc29_time,oc30_time,oc31_time,\"\n outstr += \"oc32_time,oc33_time,\"\n anames = cf.activity_list\n for i in range(len(anames)):\n outstr += anames[i] + \"_time,\"\n outstr += \"attraction_time,house_time,\"\n outstr += \"restaurant_time,road_time,service_time,store_time,work_time,\"\n outstr += \"other_time\"\n return outstr", "def get_header():\n str_list = ['specimennumber','speciesid','group','family','genus','species','scientificname', \\\n 'commonname','country','state','county','locality','latitude','longitude', \\\n 'source','accuracy','drainagename','centroidtype','huc8name','huc8', \\\n 'huc10name','huc10','huc12name','huc12','date','year','month','day','status','comments', \\\n 'recordtype','disposal','museumcatnumber','freshmarineintro','references']\n return str_list", "def _format_header(self):\n return self._format_dict(self.header)", "def header(self, artifacts, options=None):\n if options is None:\n options = {}\n\n if not artifacts or False == isinstance(artifacts, dict) or \\\n False == isinstance(options, dict):\n return ''\n\n h_artifacts = copy.copy(artifacts)\n del h_artifacts['mac']\n\n h_artifacts['hash'] = options.get('hash', None)\n\n if 'ext' in options:\n h_artifacts['ext'] = options['ext']\n\n credentials = self.credentials_fn(h_artifacts['id'])\n if not credentials or 'key' not in credentials or \\\n 'algorithm' not in credentials:\n return ''\n\n if 'hash' not in h_artifacts or h_artifacts['hash'] is None or \\\n len(h_artifacts['hash']) == 0:\n if 'payload' in options:\n h_artifacts['hash'] = hcrypto.calculate_payload_hash(\n options['payload'], credentials['algorithm'],\n options['contentType'])\n\n mac = hcrypto.calculate_mac('response', credentials, h_artifacts)\n\n header = 'Hawk mac=\"' + mac + '\"'\n if 'hash' in h_artifacts:\n header += ', hash=\"' + h_artifacts['hash'] + '\"'\n\n if 'ext' in h_artifacts and h_artifacts['ext'] is not None and \\\n len(h_artifacts['ext']) > 0:\n\n h_ext = util.check_header_attribute(\n h_artifacts['ext']).replace('\\\\', '\\\\\\\\').replace('\\n', '\\\\n')\n\n header += ', ext=\"' + h_ext + '\"'\n\n return header", "def get_csv_header(verbose=False):\n if verbose:\n return \"Time,Raw Time,Name,ID,Value\\n\"\n else:\n return \"Time,Name,Value\\n\"", "def __str__(self):\n\n table_list = [self.headers]\n\n for row in self.data:\n table_list.append([row[col] or \"\" for col in self.headers])\n\n return create_table_string(table_list)", "def get_header(self, root):\n header = etree.SubElement(root, \"FileHeader\")\n header.set(\"revMajor\", \"1\")\n header.set(\"revMinor\", \"0\")\n header.set(\"date\", datetime.today().strftime(\"%Y-%m-%dT%H:%M:%S\"))\n header.set(\"description\", \"Generated OpenSCENARIO File\")\n header.set(\"author\", \"QGIS OSCGenerator Plugin\")", "def _printable(self):\n toPrint = \"Communication header. \"\n toPrint += \"Remote App ID: \" + str(self.remote_app_id) + \" \"\n toPrint += \"Remote Node: \" + str(self.remote_node) + \" \"\n toPrint += \"Remote Port: \" + str(self.remote_port) + \" \"\n\n return toPrint", "def header_huffington(self):\n head = '\\n ^^Polls ^^fetched ^^from ^^[http://elections.huffingtonpost.com/](http://elections.huffingtonpost.com/).\\n\\n'\n head += '***{}***\\n\\n'.format(self.get_greeting())\n head += '.\\n\\n'\n head += '.\\n\\n'\n return head", "def dataOrHeader(self, name, doH):\r\n f = open(self.location + \"/\" + name)\r\n r = f.read()\r\n f.close()\r\n index = r.find(self.dividerString_)\r\n dataOrHeader = r[index+1:len(r)] if doH else r[0:index]\r\n #hacky fix for random \\r\r\n dataOrHeader = dataOrHeader.replace(\"\\r\", \"\") \r\n return dataOrHeader", "def create_header(stack):\n prehead = map(lambda x: ''.join([\n ''.join(['+']*(x//2**8)),\n '>+>',\n ''.join(['+']*(x%2**8))]), \n stack)\n head = ''.join(['+>',\n '>+>'.join(prehead),\n '<<<'])\n return head", "def csv_make_header(self, fileobj, title, comment=\"\"):\n #Line of header info\n \n fileobj.write(csv_line( ['Notes'] + [x.name for x in self.angles] + ['Wait For/n', 'Value'] ) )", "def csv_make_header(self, fileobj, title, comment=\"\"):\n #Line of header info\n fileobj.write(csv_line( ['Comment'] + [x.name.lower() for x in self.angles] + ['Wait For', 'Value'] ) )", "def write_header(self):\n lines = [\"\"]\n\n for key in self._header_keys:\n value = self.get_attr_from_name(key)\n if isinstance(value, list):\n value = \",\".join([f\"{v:.1f}\" for v in value])\n elif isinstance(value, (float)):\n value = f\"{value:.7f}\"\n elif isinstance(value, (int)):\n value = f\"{value:.0f}\"\n\n key = (\n key.replace(\"_\", \" \")\n .title()\n .replace(\" \", \"\")\n .replace(\"MTEdit.\", \"MTEdit:\")\n )\n\n lines.append(f\"${key}={value.capitalize()}\")\n\n return lines", "def make_header(text, size=80, symbol=\"-\"):\n header = symbol * size + \"\\n\"\n header += \"%s\\n\" % text\n header += symbol * size + \"\\n\"\n return header", "def make_header(mode):\n\n return (\"{}\\n\".format('\\t'.join(\n ['#chrom', 'coord', 'total', 'dtotal'] +\n DIVNAMES[mode] +\n ['{}_{}'.format(x, y)\n for x in STATNAMES[mode]\n for y in ('left', 'right', 'total',\n 'stat', 'chisq', 'Pvalue')] +\n ['introgression'] +\n ['introg{}'.format(x) for x in INTROGPATTERNS[mode]]))\n ).encode('utf-8')", "def get_header(self, metrics=None, prepend=None,\n append=None, skip=None, split=False, with_sep=True):\n if metrics is None:\n metrics = self.data\n\n header = io.make_header_from_dict(metrics, skip=skip, split=split,\n prepend=prepend, append=append,\n with_sep=with_sep)\n\n if self.data_strs[0] != header:\n self.data_strs.insert(0, header)\n\n return header", "def __str__(self):\n if self.filename:\n filename = self.filename\n else:\n filename = 'Unknown'\n if self.endian == '<':\n endian = 'Little Endian'\n else:\n endian = 'Big Endian'\n ret_val = ('FILE: %s\\nRecord Offset: %i byte\\n' +\n 'Header Endianness: %s\\n\\n') % \\\n (filename, self.record_offset, endian)\n ret_val += 'FIXED SECTION OF DATA HEADER\\n'\n for key in self.fixed_header.keys():\n ret_val += '\\t%s: %s\\n' % (key, self.fixed_header[key])\n ret_val += '\\nBLOCKETTES\\n'\n for key in self.blockettes.keys():\n ret_val += '\\t%i:' % key\n if not len(self.blockettes[key]):\n ret_val += '\\tNOT YET IMPLEMENTED\\n'\n for _i, blkt_key in enumerate(self.blockettes[key].keys()):\n if _i == 0:\n tabs = '\\t'\n else:\n tabs = '\\t\\t'\n ret_val += '%s%s: %s\\n' % (tabs, blkt_key,\n self.blockettes[key][blkt_key])\n ret_val += '\\nCALCULATED VALUES\\n'\n ret_val += '\\tCorrected Starttime: %s\\n' % self.corrected_starttime\n return ret_val", "def getHeader(self):\n return self.data.header", "def _printable(self):\n toPrint = \"Sequence header. \"\n toPrint += \"Command length: \" + str(self.cmd_length) + \" \"\n\n return toPrint", "def generate_header(value, params):\n parts = [quote(value)]\n for key in params:\n parts.append('%s=\"%s\"' % (key, quote(params[key])))\n return '; '.join(parts)", "def _make_event_header( self, size_of_the_rest, timestamp, duration, keycode ) : \r\n\r\n # 'D' and the size of the message are not counted \r\n sizeof_int32 = 4 \r\n addendum = 3 * sizeof_int32\r\n \r\n total_length = addendum + size_of_the_rest\r\n\r\n ## return struct.pack( \"=sH2L4s\", 'D', total_length, timestamp, duration, keycode ) \r\n result_str = struct.pack( \"=sH2L4s\", 'D', total_length, timestamp, duration, keycode ) \r\n\r\n # # debug \r\n # print 'header: \"%s\" ' % (result_str, )\r\n\r\n return result_str", "def get_license_string(self):\n output = ''\n if self.license_id:\n output += '{}'.format(self.license_id)\n if self.license_creation_date:\n output += ' (Created {})'.format(self.license_creation_date)\n if self.license_type:\n output += ' {}'.format(self.license_type)\n if self.license_status:\n output += ' - {}'.format(self.license_status)\n return output", "def __str__(self):\r\n # this will hold the output lines while we are generating them\r\n output_lines = []\r\n\r\n # Build an ordered list of headers\r\n # 2. The optional columns in the mapping file\r\n headers_present = self._metadata.iteritems().next()[1].keys()\r\n optional_headers = list(set(headers_present) -\r\n set(self.req_header_prefix +\r\n self.req_header_suffix))\r\n\r\n headers = (self.req_header_prefix + optional_headers +\r\n self.req_header_suffix)\r\n\r\n output_lines.extend(self.Comments)\r\n output_lines.append('#' + '\\t'.join(headers))\r\n\r\n for sample_id, data in self._metadata.iteritems():\r\n current_data = []\r\n\r\n # Get the first required columns\r\n current_data.append(sample_id)\r\n # skip the SampleID required header, since we get that from the\r\n # dict we are currently iterating over\r\n for header in self.req_header_prefix[1:]:\r\n current_data.append(data[header])\r\n\r\n # Get the optional columns; allow for None in these columns\r\n for header in optional_headers:\r\n value = self.no_data_value if data[header] is None else \\\r\n data[header]\r\n\r\n current_data.append(value)\r\n\r\n # get the last required columns\r\n for header in self.req_header_suffix:\r\n current_data.append(data[header])\r\n\r\n output_lines.append('\\t'.join([str(x) for x in current_data]))\r\n\r\n return '\\n'.join(output_lines) + '\\n'", "def format_report_header(self):", "def header(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"header\")", "def _msg_hdr(self, msg):\n hdr = self.help_str + msg\n self.header_text.set_text(hdr)", "def create_message_header(self):\n header = MessageHeader(sender=self.common_name, recipient=self.aggregator_uuid, federation_id=self.federation_uuid, counter=self.counter, single_col_cert_common_name=self.single_col_cert_common_name)\n return header", "def describe(self):\n return ''", "def _printable(self):\n toPrint = \"Factory Header. \"\n toPrint += \"Number of iterations: \" + str(self.num_iter) + \" \"\n return toPrint", "def __repr__(self):\n out = \"\"\n for section_name, section_data in sorted(self.header.items()):\n if section_name== 'Error':\n continue\n out += '\\n'.join(['='*80, \" \"*20 + section_name, '='*80]) + '\\n'\n for key, val in sorted(section_data.items()):\n out += ' - {0} : {1}\\n'.format(key, val)\n out += '\\n'\n return out", "def get_string(self):\n if not self._flip:\n this_column_specifier = TABLE_NUMROWS_SEP + TABLE_NUMROWS_SEP.join(\n [\"l\" for col in xrange(self._num_cols)]) + TABLE_NUMROWS_SEP\n this_table_header = TABLE_HEADER_TEMPLATE.substitute(\n column_specifier = this_column_specifier,\n caption = str(self._caption),\n tag = str(self._tag))\n this_table_column_headers = COLUMN_HEADERS_TEMPLATE.substitute(\n column_headers = TABLE_COLSEP.join(\n [str(header_elt) for header_elt in self._header]))\n this_table_content = (TABLE_ROWSEP + os.linesep).join(\n [TABLE_COLSEP.join([str(row_elt) for row_elt in row])\n for row in self._rows])\n return os.linesep.join([this_table_header,\n this_table_column_headers,\n ENDHEADER,\n this_table_content,\n TABLE_FOOTER])\n else:\n this_column_specifier = (\n TABLE_NUMROWS_SEP + \"l\" + TABLE_NUMROWS_SEP +\n TABLE_NUMROWS_SEP + TABLE_NUMROWS_SEP.join(\n [\"l\" for col in xrange(self._num_cols - 1)]) +\n TABLE_NUMROWS_SEP)\n this_table_header = TABLE_HEADER_TEMPLATE.substitute(\n column_specifier = this_column_specifier,\n caption = str(self._caption),\n tag = str(self._tag))\n this_table_content = (TABLE_ROWSEP + os.linesep).join(\n [TABLE_COLSEP.join([header_elt] + [str(elt) for elt in row])\n for (header_elt, row) in zip(self._header, self._rows)])\n return os.linesep.join([this_table_header,\n ENDHEADER,\n this_table_content,\n TABLE_FOOTER])", "def title(self):\n return self.header", "def generate_headers(self):\n raise NotImplementedError()" ]
[ "0.73597187", "0.72831184", "0.71944606", "0.7154044", "0.6919942", "0.6901166", "0.6901166", "0.675267", "0.66368526", "0.66322434", "0.66226566", "0.65845186", "0.658279", "0.658279", "0.6578081", "0.6538257", "0.6538257", "0.6524499", "0.65011746", "0.641547", "0.63942367", "0.6302408", "0.628779", "0.6277615", "0.6227234", "0.62013006", "0.62003803", "0.6200267", "0.6196637", "0.61892676", "0.61879355", "0.618689", "0.6184799", "0.6180728", "0.6160292", "0.61579067", "0.61554474", "0.61479867", "0.61445105", "0.61439246", "0.6127768", "0.6126906", "0.6122519", "0.61033434", "0.6084674", "0.60799754", "0.6069299", "0.6055879", "0.604885", "0.6043695", "0.6037077", "0.6028584", "0.60270786", "0.6017853", "0.6014223", "0.60027206", "0.5990209", "0.5966119", "0.5959103", "0.5958899", "0.5952309", "0.5951395", "0.5951013", "0.59481704", "0.59473044", "0.59430224", "0.5941032", "0.5937637", "0.59297156", "0.5925325", "0.59237164", "0.59201235", "0.5913941", "0.5913586", "0.5892388", "0.5889484", "0.5885898", "0.58812845", "0.5877281", "0.5873732", "0.5872273", "0.58704937", "0.5865199", "0.58577746", "0.58522207", "0.581786", "0.58080083", "0.58057743", "0.57990617", "0.57940304", "0.57933456", "0.57886803", "0.57827336", "0.5781407", "0.5779544", "0.5777935", "0.5749824", "0.57486033", "0.57408786", "0.57390904" ]
0.7559777
0
Reads CSV data on initialization.
def initialize(self, ctx): super().initialize(ctx) self._csv_reader = CsvReader() self._csv_reader.data = ctx.interpolate(self.data) self._csv_reader.strip = True ctx.comp.initialize(self._csv_reader) for m in self._csv_reader.process(ctx, None): self.insert(ctx, m)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)", "def read_csv():", "def read_csv_file(self):\n pass", "def _read_csv(self):\n with open(self._file_path, 'rb') as f:\n reader = csv.DictReader(f, delimiter=',')\n self._content = [row for row in reader]", "def handle_csv(self):\n try:\n reader = csv.reader(open(self.options.datafile, 'r'))\n except IOError:\n errormsg(_('Cannot read \"{}\"'.format(self.options.datafile)))\n raise Exception(_('Cannot read \"{}\"'.format(self.options.datafile)))\n if self.options.var_type == 'name':\n try:\n self.header = reader.next()\n except StopIteration:\n errormsg(_('Data file \"{}\" contains no data'.format(\n self.options.datafile)))\n raise Exception(_('Data file \"{}\" contains no data'.format(\n self.options.datafile)))\n self.data = []\n for row in reader:\n self.data.append(row)", "def read(self):\r\n\r\n self.data = []\r\n\r\n with open(self.filename + \".csv\", mode='r') as csv_file:\r\n reader = csv.DictReader(csv_file)\r\n for row in reader:\r\n self.data.append(row)", "def read_csv(self, csv_input):\n # https://stackoverflow.com/a/45063514\n dtypes = {\n 'lat': 'U',\n 'long': 'U'\n }\n csv_data = pd.read_csv(csv_input, encoding='UTF-8', sep=',', na_values=[''], dtype=dtypes)\n\n self.table = csv_data.fillna('').applymap(lambda x: x.strip() if type(x) == str else x)\n self.log.info('Data read from CSV %s' % csv_input)\n #print('Data read from CSV %s' % csv_input)", "def get_data(self, csv_file):\n pass", "def _loadCSVFile(self):\n self._df = pd.read_csv(\n self._pathfile, sep=CSV_SEPARATOR, index_col=CSV_INDEX_COL)", "def read_csv(self) -> None:\n\n self._df = pd.read_csv(self._dataset_file)", "def loadCSV(input_file):", "def post_init(cr, registry):\n import_csv_data(cr, registry)", "def __init__(self, *, csv_file_path: str = ''):\n self.__csv_file_path = csv_file_path\n self._parse_csv()", "def load_csv(self):\n self.database = pd.read_csv(\n self.settings['database_path'],\n encoding='utf-8')", "def read(self):\n with open(self.filename) as f:\n reader=csv.reader(f)\n for row in reader:\n self.data.appendleft(row)", "def load_data(self):\n self.data = pd.read_csv(self.data_path, dtype=self.dtype)\n self.data.columns = self.data_cols\n self.data.topic = self.data.topic.str.lower()\n logging.debug(f'Data Load Complete: {self.data_path}')", "def __init__(self, csvfile, *args, **kwargs):\n self.encoding = kwargs.pop('encoding', 'utf-8')\n csv.DictReader.__init__(self, csvfile, *args, **kwargs)", "def place_types_read_csv(self, csv_input):\n csv_data = pd.read_csv(csv_input, encoding='UTF-8', sep=',', na_values=[''])\n self.table = csv_data.fillna('').applymap(lambda x: x.strip() if type(x) == str else x)\n self.log.info('Data read from CSV %s' % csv_input)", "def __obtain_data_from_csv__(self, csvfile):\n data = csvfile.readlines()\n data = self.__parse_string_for_delimiter__(data)\n return data", "def _read_csvs(self):\n self.data = pd.read_csv(self.path+self.name, index_col=0)", "def read_csv():\n global csvdata\n global CONFIG\n if type(csvdata) == type(None):\n if not os.path.exists(CONFIG[\"csvfile\"]):\n csvdata = pandas.read_csv(CONFIG[\"csvrepo\"],\n na_values=[\"-999999\",\"NOT AVAILABLE\"])\n os.makedirs(CONFIG[\"cachedir\"],exist_ok=True)\n csvdata.to_csv(CONFIG[\"csvfile\"])\n else:\n csvdata = pandas.read_csv(CONFIG[\"csvfile\"])\n return csvdata", "def __init__(self, csv_file: str = None) -> None:\n super().__init__(csv_file)", "def __init__(self, csv_file: str = None) -> None:\n super().__init__(csv_file)", "def read_data(self):\n print 'Reading Data ...'\n fname = self.wpath + 'Data/' + self.city[2] + '-' + self.application + '.csv.bz2'\n self.dataset = loadtxt(fname, skiprows=1,\n dtype=[('lat', 'f8'), ('lng', 'f8'), ('time', 'i4'), ('user', 'S20')],\n usecols=(0, 1, 2, 3), delimiter=';', comments='#')", "def __init__(self, path):\n self.csv_path = path\n # check if csv format is valid or not\n self.check_valid_csvformat(self.csv_path)\n \"\"\" empty dict to store all company names\n prepare initial company data in dictionary format \"\"\"\n self.company_data = dict()", "def load_from_csv(self):\n\n self._logger.info('Reading data coming from CSV files')\n\n sta = self.stations\n\n if sta != None:\n msta = \", \".join(sta)\n self._logger.debug('Using only stations {0}'.format(msta))\n\n # load the data\n v = list(self.variables)\n v.append('metadata')\n for i in v:\n if i in self.dataConfig:\n\n self._logger.debug('Reading %s...' % self.dataConfig[i])\n if i == 'metadata':\n dp_final = pd.read_csv(self.dataConfig[i],\n index_col='primary_id')\n #Ensure all stations are all caps.\n dp_final.index = [s.upper() for s in dp_final.index]\n\n elif self.dataConfig[i]:\n dp_full = pd.read_csv(self.dataConfig[i],\n index_col='date_time',\n parse_dates=[0])\n dp_full.columns = [s.upper() for s in dp_full.columns]\n\n if sta is not None:\n\n data_sta = dp_full.columns.str.upper()\n\n # Grab IDs from user list thats also in Data\n self.stations = [s for s in data_sta if s in sta]\n dp = dp_full[dp_full.columns[(data_sta).isin(sta)]]\n\n else:\n dp = dp_full\n\n # Only get the desired dates\n dp_final = dp[self.start_date:self.end_date]\n\n if dp_final.empty:\n raise Exception(\"No CSV data found for {0}\"\n \"\".format(i))\n\n setattr(self, i, dp_final)", "def dataLoad():\n try:\n try: #Python3\n f = open(__file__ + \".csv\",\"rt\")\n except: #Python2\n f = open(__file__ + \".csv\",\"rb\")\n data = f.read().split(',')\n entryCol.entry0.delete(0,END)\n entryCol.entry0.insert(0,data[0])\n entryCol.entry1.delete(0,END)\n entryCol.entry1.insert(0,data[1])\n entryCol.entry2.delete(0,END)\n entryCol.entry2.insert(0,data[2])\n entryCol.entry3.delete(0,END)\n entryCol.entry3.insert(0,data[3])\n botWind.writeN(\"DataLoad: File\")\n except:\n botWind.writeN(\"DataLoad: Default\")", "def __init__(self, csvfile):\n self._reader = csv.DictReader(\n csvfile,\n delimiter=self.CSVCONFIG.delimiter,\n doublequote=self.CSVCONFIG.doublequote,\n escapechar=self.CSVCONFIG.escapechar,\n lineterminator=self.CSVCONFIG.lineterminator,\n quotechar=self.CSVCONFIG.quotechar,\n quoting=self.CSVCONFIG.quoting,\n skipinitialspace=self.CSVCONFIG.skipinitialspace,\n )\n self.badRows = []", "def load_csv(csvpath):\n with open(csvpath, \"r\") as csvfile:\n data = []\n csvreader = csv.reader(csvfile, delimiter=\",\")\n\n # Skip the CSV Header\n next(csvreader)\n\n # Read the CSV data\n for row in csvreader:\n data.append(row)\n return data", "def __init__(self, csv_path):\n # Checking files\n fdops.check_if_file_exists(csv_path)\n\n # loading proposal data as a data frame\n self._df = pd.read_csv(csv_path)\n\n # Dictionary containing proposal properties\n self.props = self._get_properties(csv_path)", "def read_from_csv(self, csv_file):\n data = []\n with codecs.open(csv_file, 'r', encoding='utf-8') as csvfile:\n header = None\n for i, line in enumerate(csvfile):\n line_split = [x.strip() for x in line.split(\"|\")]\n line_data = [x for x in line_split if len(x) > 0]\n if i == 0:\n header = line_data\n else:\n entry = {}\n for i,datum in enumerate(line_data):\n entry[header[i]] = datum\n data.append(entry)\n print \"Loaded %d entries from %s\" % (len(data), csv_file)\n return data", "def read_csv(self, csv_file):\n mylog.debug('Reading csv file %s for data' % csv_file)\n csv_data = pandas.read_csv(csv_file)\n mylog.debug('Read of csv file complete.')\n #mylog.debug('%s' % csv_data)\n #sometimes the csv has an empty dataframe #\n if csv_data.empty:\n mylog.debug('Data frame is empty; repopuating data')\n csv_info = []\n for item in csv_data:\n #add the data one cell at a time to the list #\n #for some reason, some csvs have the data #\n #with random decimal points #\n csv_info.append(item.split(\".\")[0])\n df = pandas.DataFrame(columns=csv_info)\n df.loc[0]=csv_info\n #write the data from the list back into the cells#\n #one at a time #\n for column in range(0, len(csv_info)): \n df.iloc[0,column] = csv_info[column]\n csv_data = df \n return csv_data", "def readRecordFromFile():\n\twith open(gbl.sourceFile, newline='') as csvfile:\n\t\trowReader = csv.reader(csvfile, delimiter=gbl.csvDiscriminator, quotechar=gbl.csvQuotechar)\n\t\tfor row in rowReader:\n\t\t\tROWData.append(row)", "def _load(self):\n op_type_file_path = os.path.join(\n self._profiling_dir,\n self._csv_file_to_analyse.format(self._device_id)\n )\n op_type_file_path = validate_and_normalize_path(\n op_type_file_path, raise_key=\"Invalid op_type_file_path\")\n if not os.path.isfile(op_type_file_path):\n log.warning('The file <%s> does not exist.', op_type_file_path)\n return\n\n with open(op_type_file_path, 'r') as file:\n csv_reader = csv.reader(file)\n _ = next(csv_reader)\n for info in csv_reader:\n self._data.append(self._convert_field_type(info))", "def read_file(self):\n # This is quite ugly but works for now.\n self.header = read_csv(self.file_name, delim_whitespace=True,\n header=TrackData.header_line,\n nrows=1).to_dict(orient='index')[0]\n self.data = read_csv(self.file_name, delim_whitespace=True, \n header=TrackData.data_line)", "def csv_data_loader(path):\n\n logging.info(\"Loading file using SparkSession\")\n csvload = Spark.instance.spark() \\\n .read \\\n .format(\"csv\") \\\n .options(header=True) \\\n .options(mode=\"DROPMALFORMED\")\n\n return csvload.option(\"inferSchema\", \"true\").load(path)", "def read(self):\n \n self.df = pd.read_csv(self.path, encoding = \"ISO-8859-1\")", "def init_cache_from_csv(self, path: str) -> None:\n log.debug(f\"Initalising {self.name} from csv at {path}\")\n df = io.csv_to_df(path=path)\n df = df.set_index(self.ids).sort_index(axis=0).sort_index(axis=1)\n io.df_to_parquet(df=df, path=self.path)\n log.debug(f\"{self.name} now cached in local parquet.\")", "def readcsv(path, delimiter= ','):\n my_data = genfromtxt(path, delimiter= delimiter)\n return my_data", "def read_csv(path):\r\n data = []\r\n csv_file = open(path)\r\n for row in csv.DictReader(csv_file):\r\n data.append(row)\r\n csv_file.close() \r\n return data", "def read_data_from_csv(csv_file, header=None, **kwargs):\n if os.path.isabs(csv_file) == False:\n path_to_csv = os.path.join(csv_file)\n else:\n path_to_csv = csv_file\n row_list = []\n if \"field_sep\" not in kwargs.keys():\n field_sep = ','\n else:\n field_sep = kwargs.get(\"field_sep\")\n with open(path_to_csv, mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file, delimiter=field_sep, fieldnames=header)\n for record in csv_reader:\n if list(record.values())[0].startswith(\"#\") is not True:\n # IT'S A COMMENT IF IT STARTS WITH \"#\" \n # IF THIS IS YOUR HEADER ROW, SUPPLY A LIST OF COLUMN NAMES WHEN CALLING THE FUNCTION\n row_list.append(record)\n return row_list", "def read_csv_data(csv_path):\n\n return pd.read_csv(csv_path, sep=',', engine='python')", "def _read_csv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n return list(csv.reader(f, delimiter=\",\", quotechar=quotechar))", "def _read_csv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n return list(csv.reader(f, delimiter=\",\", quotechar=quotechar))", "def load(self, path):\n self.df = pd.read_csv(path)\n print(\"Loaded data from {}\".format(path))", "def data_from_csv(self, filepath):\n self.dataframe = pd.load_csv(filepath, separator='')", "def _load_csv_data(kingdom_csv_path: str):\n\n file_path = os.getcwd() + \"/\" + RESOURCES_DIR_PATH + \"/\" + kingdom_csv_path\n\n kingdomArr = []\n\n with open(file_path, newline=\"\") as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\")\n for row in reader:\n kingdomArr.append(Kingdom(row[0], row[1]))\n\n return kingdomArr", "def load_csv(self):\n\n # Close any already opened file if any\n self.close_file()\n\n # Disable cell change check to avoid crashes\n self.check_cell_change = False\n\n # Set the flag to no changes in current file state\n self.file_changed = False\n self.set_save_enabled(False)\n\n csv_file_path = QFileDialog.getOpenFileName(self, \"Load CSV File\", \"\", 'CSV(*.csv)')\n\n # Proceed if and only if a valid file is selected and the file dialog is not cancelled\n if csv_file_path[0]:\n # Get only the file name from path. eg. 'data_file.csv'\n filepath = os.path.normpath(csv_file_path[0])\n filename = filepath.split(os.sep)\n self.csv_file_name = filename[-1]\n\n self.loading_progress = QProgressDialog(\"Reading Rows. Please wait...\", None, 0, 100, self)\n self.loading_progress.setWindowTitle(\"Loading CSV File...\")\n self.loading_progress.setCancelButton(None)\n\n # enable custom window hint\n self.loading_progress.setWindowFlags(self.loading_progress.windowFlags() | QtCore.Qt.CustomizeWindowHint)\n # disable (but not hide) close button\n self.loading_progress.setWindowFlags(self.loading_progress.windowFlags() & ~QtCore.Qt.WindowCloseButtonHint)\n\n # Show waiting cursor till the time file is being processed\n QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)\n\n self.loading_worker = CsvLoaderWorker(csv_file_path=csv_file_path, csv_data_table=self.csv_data_table,\n column_headers=self.column_headers,\n column_headers_all=self.column_headers_all)\n\n self.loading_thread = QThread()\n # Set higher priority to the GUI Thread so UI remains a bit smoother\n QThread.currentThread().setPriority(QThread.HighPriority)\n\n self.loading_worker.moveToThread(self.loading_thread)\n self.loading_worker.workRequested.connect(self.loading_thread.start)\n self.loading_thread.started.connect(self.loading_worker.process_loading_file)\n self.loading_worker.finished.connect(self.on_loading_finish)\n\n self.loading_worker.relay.connect(self.update_loading_progress)\n self.loading_worker.progress_max.connect(self.set_maximum_progress_value)\n self.loading_worker.update_bottom_toolbar.connect(self.set_bottom_toolbar_info)\n\n self.loading_progress.setValue(0)\n self.loading_worker.request_work()\n\n self.check_cell_change = True\n\n # Close the start page tab and load the file tab\n self.tabWidget.removeTab(0)\n self.tabWidget.insertTab(1, self.csv_table_tab, \"Main Document\")\n\n # Enable Column Layout menu option\n self.action_column_layout.setEnabled(True)\n self.action_add_data.setEnabled(True)\n self.action_add_column.setEnabled(True)\n self.action_toolbar_add_data.setEnabled(True)\n self.action_close_file.setEnabled(True)", "def from_csv(cls, load_folder: Path) -> \"Parameters\":\n serializer = serializer_factory(fmt=SerializerEnum.CSV)\n return serializer.load(class_obj=cls, folder_path=load_folder)", "def read_csv(self, path):\n for file in os.listdir(path):\n if file[-4:] == \".csv\":\n name = file[:-4]\n table_index_header = cfg.get_list(\"table_index_header\", name)\n filename = os.path.join(path, file)\n self.input_data[name] = pd.read_csv(\n filename,\n index_col=list(range(int(table_index_header[0]))),\n header=list(range(int(table_index_header[1]))),\n squeeze=(\"series\" not in name),\n )\n self.check_input_data(warning=False)\n self.add_meta_data()\n return self", "def from_csv(self, csvfile, encoding='utf-8', dialect='excel', **kwds):\n if isinstance(csvfile, string_types):\n encoding = _normalize_decoder(encoding)\n reader, close_file = _from_csv_path(csvfile, encoding, dialect=dialect, **kwds)\n return Reader(reader, closefunc=close_file)\n\n reader = _from_csv_iterable(csvfile, encoding, dialect=dialect, **kwds)\n return Reader(reader)", "def load_data(self):\n if self.file_path is not None:\n if not os.path.isfile(self.file_path):\n raise FileNotFoundError('The file does not exist')\n\n with open(self.file_path, errors='ignore') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n # skipping header\n csv_reader.__next__()\n # process extraction\n [self.extract_data(record) for record in csv_reader]\n\n if not self.complaints_data:\n raise ValueError('No data to load')\n\n return self.complaints_data", "def read_csv(self):\n with open(self.csv_file, 'rU') as file_object:\n reader = csv.reader(file_object, delimiter=self.delimiter)\n if self.has_header_row:\n header_row = next(reader, None)\n if self.has_duplicate_column_names:\n header_counts_dict = dict()\n new_header_row = []\n for each_header in header_row:\n try:\n header_counts_dict[each_header] += 1\n except KeyError:\n header_counts_dict[each_header] = 1\n frequency = header_counts_dict[each_header]\n if frequency==1:\n new_header_row.append(each_header)\n else:\n new_header_row.append(each_header+str(frequency))\n header_row = new_header_row\n else:\n header_row = self.provided_headers_list\n rows = [\n { header: value for header, value in zip(header_row, next_row)}\n for next_row in reader\n ]\n return header_row, rows", "def read_csv(path):\n csv_data =[]\n \n with open(path, 'r') as csv_file:\n csv_read = csv.reader(csv_file, dialect='excel')\n for row in csv_read:\n csv_data.append(row)\n\n return(csv_data)", "def load_data_from_csv(csv_file, users_to_i = {}, items_to_i = {}):\n raw_data = []\n with open(csv_file) as f:\n csvreader = csv.reader(f)\n # skipping first row (header)\n next(csvreader)\n for user, item in csvreader:\n raw_data.append((user, item))\n return load_data_from_array(raw_data, users_to_i, items_to_i)", "def __init__(self, header=None, rows=None, fromfile=None, delimiter=','):\n\t\tif fromfile:\n\t\t\trs = Csv(fromfile,delimiter=delimiter).getRows()\t\n\t\t\tself.header=rs[0]\n\t\t\tself.data = [] #a list of dictionaries\n\t\t\tfor r in rs[1:]:\n\t\t\t\tacc= dict()\n\t\t\t\tfor h in self.header:\n\t\t\t\t\tacc[h]=r[self.header.index(h)]\t\n\t\t\t\tself.data.append(acc) \n\t\telif header:\n\t\t\tself.header = header\t\n\t\t\tself.data = []\n\t\t\tif rows:\n\t\t\t\tif isinstance(rows[0],dict):\n\t\t\t\t\tself.data = rows\n\t\t\t\telif isinstance(rows[0],list):\t\n\t\t\t\t\tfor r in rows:\n\t\t\t\t\t\tacc= dict()\n\t\t\t\t\t\tfor h in self.header:\n\t\t\t\t\t\t\tacc[h]=r[self.header.index(h)]\n\t\t\t\t\t\tself.data.append(acc) \n\t\telse:\n\t\t\tself.header = []\t\n\t\t\tself.data = []", "def load_data(self):\n df = pandas.read_csv(self.path)\n self.data_dict = df.to_dict(orient=\"list\")\n return self.data_dict", "def __init__(self, file_path=None, writer=None,\n output_encoding=\"utf-8\", input_encoding=\"utf-8\",\n try_encodings_hard=True, fallback_input_encodings=None,\n from_row=0, from_col=0, ignore_blank_rows=False,\n input_dialect=csv.excel):\n self.file_path = None\n self.output_encoding = output_encoding\n self.input_encoding = input_encoding\n\n # useful to know about this for any future work on encodings: https://docs.python.org/2.4/lib/standard-encodings.html\n if fallback_input_encodings is None and try_encodings_hard:\n fallback_input_encodings = [\"cp1252\", \"cp1251\", \"iso-8859-1\", \"iso-8859-2\", \"windows-1252\", \"windows-1251\", \"mac_roman\"]\n else:\n fallback_input_encodings = []\n self.fallback_input_encodings = fallback_input_encodings\n\n self.from_row = from_row\n self.from_col = from_col\n self.ignore_blank_rows = ignore_blank_rows\n self.input_dialect = input_dialect\n\n # Store the csv contents in a list of tuples, [ (column_header, [contents]) ]\n self.data = []\n\n # Get an open file object from the given file_path or file object\n if file_path is not None:\n if type(file_path) == file:\n self.file_path = file_path.name\n # NOTE: if you have passed in a file object, it MUST work - as in, it must be set to\n # read the right encoding, and everything. We will not try to parse it again if it\n # fails the first time. If it is closed, you will also need to be sure to set the input_encoding.\n # All round - better if you just give us the file path\n self.file_object = file_path\n if self.file_object.closed:\n self.file_object = codecs.open(self.file_object.name, 'r+b', encoding=self.input_encoding)\n\n # explicitly read this file in\n self._read_file(self.file_object)\n else:\n self.file_path = file_path\n if os.path.exists(file_path) and os.path.isfile(file_path):\n self._read_from_path(file_path)\n else:\n # If the file doesn't exist, create it.\n self.file_object = codecs.open(file_path, 'w+b', encoding=self.output_encoding)\n\n elif writer is not None:\n self.file_object = writer", "def read_csv(csv_file):\r\n with open(csv_file, \"r\") as files:\r\n data = csv.reader(files)\r\n return list(data)", "def load_input(self, number_of_rows_to_read):\n self.dataframe = pandas.read_csv(self.filename, nrows=number_of_rows_to_read)\n #self._describe_input_data()", "def load_from_file_csv(cls):\n try:\n with open(cls.__name__ + \".csv\", \"r\") as f:\n ld = []\n reader = csv.DictReader(f)\n for row in reader:\n for key, val in row.items():\n row[key] = int(val)\n ld.append(row)\n return [cls.create(**item) for item in ld]\n except FileNotFoundError:\n return []", "def read_csv(csv_file_path):\n res = [] #list\n # f = open(csv_file_path) #read file\n with open(csv_file_path,\"r\") as f:", "def load_csv(csv_path, params={}):\n csv_content = []\n with open(csv_path, encoding='utf-8') as fd:\n obj = csv.reader(fd, params)\n for line in obj:\n csv_content.append(line)\n\n return csv_content", "def __init__(self, csv_path):\r\n # Transforms\r\n self.to_tensor = transforms.ToTensor()\r\n # Read the csv file\r\n self.data_info = pd.read_csv(csv_path, header=None)\r\n # First column contains the image paths\r\n self.image_arr = np.asarray(self.data_info.iloc[:, 0])\r\n # Second column is the labels\r\n self.label_arr = [np.asarray(self.data_info.iloc[:, 1])]\r\n # Third column is for an operation indicator\r\n #self.operation_arr = np.asarray(self.data_info.iloc[:, 2])\r\n # Calculate len\r\n self.data_len = len(self.data_info.index)", "def from_csv(self, path_to_load):\n import pandas as pd\n\n df = pd.read_csv(path_to_load)\n df = df.loc[:, ~df.columns.str.contains('^Unnamed')] # Remove unnnamed\n\n self.results['cids'] = list()\n self.results['differences'] = list()\n self.results['experimental_values'] = list()\n\n pd_dict = df.to_dict()\n length = len(pd_dict['cids'])\n for cid in [pd_dict['cids'][i] for i in range(0, length)]:\n self._results['cids'].append(cid)\n for cid in [pd_dict['differences'][i] for i in range(0, length)]:\n self._results['differences'].append(cid)\n for cid in [pd_dict['experimental_values'][i]\n for i in range(0, length)]:\n self._results['experimental_values'].append(cid)", "def __init__(self, file_path):\n self.file_path = file_path\n self.fd = open(self.file_path)\n self.reader = csv.reader(self.fd, delimiter='\\t')\n # skip first line\n first_line = next(self.reader)\n self.headers = next(self.reader)", "def load_data_csv():\r\n \r\n # Load lookup table\r\n path = 'data/id_lookup.csv'\r\n lookup_table = pd.read_csv(path, index_col=0)\r\n\r\n # Load song data\r\n path2 = 'data/data_lyrics_features.csv'\r\n data = pd.read_csv(path2, index_col=0)\r\n\r\n return data, lookup_table", "def test_csv_reader_data_contents(process_data):\n data = process_data(file_name_or_type='clean_map.csv')\n\n # Check row types\n for row in data:\n assert(isinstance(row['Country'], str))\n assert(isinstance(row['City'], str))\n assert(isinstance(row['State_Or_Province'], str))\n assert(isinstance(row['Lat'], float))\n assert(isinstance(row['Long'], float))\n assert(isinstance(row['Altitude'], float))\n\n # Basic data checks\n assert len(data) == 180 # We have collected 180 rows\n assert data[0]['Country'] == 'Andorra'\n assert data[106]['Country'] == 'Japan'", "def __init__(self, csv_path, column_types=None, set_columns=False, file_headers=True, encoding=\"utf-8-sig\",\n missing_to_zero=False, print_warnings=True):\n\n self.file_path = Path(csv_path)\n self.file_name = self.file_path.stem\n\n self._file_headings = file_headers\n self._encoding = encoding\n\n self.headers = self._extract_headers()\n self.row_length = len(self.headers)\n\n self.missing_to_zero = missing_to_zero\n self.print_warnings = print_warnings\n self.invalid_typed = []\n\n self.column_types = self._determine_column_types(column_types)\n self.row_data, self.column_data, self.column_length = self._set_data(set_columns)\n\n # Old definitions kept for legacy, but new names added for clarity\n self.num_cols = self.row_length\n self.num_rows = self.column_length\n\n if len(self.invalid_typed) > 0 and self.print_warnings:\n print(f\"Warning: The following column-row-value-type where not correct so loaded as strings:\\n\"\n f\"{sorted(self.invalid_typed)}\")", "def loadCsv(self):\n # Close any already opened file if any\n self.close_file()\n\n # Disable cell change check to avoid crashes\n self.check_cell_change = False\n\n # Set the flag to no changes in current file state\n self.file_changed = False\n self.setSaveEnabled(False)\n\n csv_file_path = QFileDialog.getOpenFileName(self, \"Load CSV File\", \"\", 'CSV(*.csv)')\n\n # Proceed if and only if a valid file is selected and the file dialog is not cancelled\n if csv_file_path[0]:\n # Get only the file name from path. eg. 'data_file.csv'\n filepath = os.path.normpath(csv_file_path[0])\n filename = filepath.split(os.sep)\n self.csv_file_name = filename[-1]\n\n self.loading_progress = QProgressDialog(\"Reading Rows. Please wait...\", None, 0, 100, self)\n self.loading_progress.setWindowTitle(\"Loading CSV File...\")\n self.loading_progress.setCancelButton(None)\n\n # enable custom window hint\n self.loading_progress.setWindowFlags(self.loading_progress.windowFlags() | QtCore.Qt.CustomizeWindowHint)\n # disable (but not hide) close button\n self.loading_progress.setWindowFlags(self.loading_progress.windowFlags() & ~QtCore.Qt.WindowCloseButtonHint)\n\n # Show waiting cursor till the time file is being processed\n QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)\n\n self.loading_worker = loader.CsvLoaderWorker(csv_file_path=csv_file_path, csv_data_table=self.csv_data_table,\n column_headers=self.column_headers,\n column_headers_all=self.column_headers_all)\n\n self.loading_thread = QThread()\n # Set higher priority to the GUI Thread so UI remains a bit smoother\n QThread.currentThread().setPriority(QThread.HighPriority)\n\n self.loading_worker.moveToThread(self.loading_thread)\n self.loading_worker.workRequested.connect(self.loading_thread.start)\n self.loading_thread.started.connect(self.loading_worker.processLoadingFile)\n self.loading_worker.finished.connect(self.on_loading_finish)\n\n self.loading_worker.relay.connect(self.update_loading_progress)\n self.loading_worker.progress_max.connect(self.set_maximum_progress_value)\n self.loading_worker.update_bottom_toolbar.connect(self.setBottomToolbarInfo)\n\n self.loading_progress.setValue(0)\n self.loading_worker.requestWork()\n\n self.check_cell_change = True\n\n # Close the start page tab and load the file tab\n self.tabWidget.removeTab(0)\n self.tabWidget.insertTab(1, self.tableTab, \"Main Document\")\n\n # Enable Column Layout menu option\n self.action_column_layout.setEnabled(True)\n self.action_add_data.setEnabled(True)\n self.action_add_column.setEnabled(True)\n self.action_toolbar_add_data.setEnabled(True)\n self.action_close_file.setEnabled(True)", "def load_CSV_data(path):\n return np.genfromtxt(os.path.join('data/traffic_data', path))", "def mock_data_loader(csv_path):\n file_path = KINGDOM_CSV_PATH\n\n kingdomArr = []\n\n with open(file_path, newline=\"\") as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\")\n for row in reader:\n kingdomArr.append(Kingdom(row[0], row[1]))\n\n return kingdomArr", "def prepare_CSV(self):\n self.drop_columns()\n self.rename_columns()\n self.spilt_columns()\n self.add_vehicle_id_column()\n self.add_source_column()\n self.add_timestamp_columns()\n self.get_colour_columns()\n self.clean_column_formats()\n\n # print(self.data.info())\n # print(self.data.sample(10))\n\n return self.data", "def _read_csv(self, input_file, quotechar=None):\n with codecs.open(input_file, \"r\", encoding=\"UTF-8\") as f:\n reader = csv.reader(f, delimiter=\",\", quotechar=quotechar)\n examples = []\n seq_id = 0\n header = next(reader) # skip header\n for line in reader:\n example = InputExample(\n guid=seq_id, label=line[0], text_a=line[1])\n seq_id += 1\n examples.append(example)\n return examples", "def set_input_csv(self):\n if len(self[\"input_csv\"]) > 1:\n raise Exception(\"You must only specify *one* unified CSV file!\")\n self.csv_path = self[\"input_csv\"][0]\n print(\"Using input file\", self.csv_path)", "def read_csv(\n type: CSVTypes,\n csv_file: UploadFile = File(...),\n db: Session = Depends(get_db),\n authorization: str = Header(None),\n settings: config.Settings = Depends(get_settings),\n):\n if authorization != settings.upload_secret:\n raise HTTPException(401, \"Operação inválida!\")\n\n lines = 0\n\n with csv_file.file as file:\n content = file.read()\n content = content.decode(\"utf-8\")\n content = content.split(\"\\n\")\n if type == CSVTypes.results:\n lines = len(import_results_csv(content, db))\n elif type == CSVTypes.templates_results:\n lines = len(import_templates_results_csv(content, db))\n elif type == CSVTypes.hospitals:\n lines = len(import_hospitals_csv(content, db))\n else:\n raise HTTPException(400)\n\n log(\"[CSV] CSV foi importado.\", db)\n\n return {\"lines\": lines}", "def __read_csv(self) -> tuple:\n with open(self.csv_file) as f:\n reader = csv.reader(f)\n for row in reader:\n if row[0].isspace():\n raise StopIteration\n yield row", "def get_raw_data_from_csv():\n data_df = pd.read_csv(static_constants.RAW_DATA_PATH)\n return data_df", "def readCSV(self):\n\n content = []\n with open(self.filename) as file:\n sn = csv.Sniffer()\n sn.preferred = [self.delimiter]\n try:\n dialect = sn.sniff(file.read(1024))\n except csv.Error:\n if not file.endswith(\"csv\"):\n self.delimiter = \"\\t\"\n file.seek(0)\n reader = csv.reader(file, delimiter=self.delimiter)\n dialect = reader.dialect\n file.seek(0)\n reader = csv.reader(file, dialect)\n rownr = 0\n\n for row in reader:\n\n if rownr == 0:\n header = row\n else:\n # print(row)\n content.append(row)\n rownr += 1\n\n file.close()\n\n return content.copy()", "def read_csv(path, number_of_header_lines=0):\n # if not os.path.isfile(path):\n try:\n return genfromtxt(path, delimiter=', ', skip_header=number_of_header_lines)\n except:\n raise ValueError(\"File does not exist!\", path)", "def read_csv(spark: SparkSession, config, log: Log4j):\n data = spark.read.schema(boston_schema).option(\"header\", \"true\").csv(config[\"boston_dataset_csv_path\"])\n log.warn(\"CSV Files Read Completed\")\n return data", "def read_csv(self, filepath, obs_vars = ['obs'], header = True):\n # determine if the type file is gzip\n filetype, encoding = mimetypes.guess_type(filepath)\n if encoding == 'gzip':\n self.data = pd.read_csv(filepath, compression='gzip')\n else:\n self.data = pd.read_csv(filepath)\n\n self.original_data = copy.deepcopy(self.data)\n if self.cutoff:\n self.data = self.data[:self.cutoff]\n \n self.data = self.data[obs_vars]\n self.N = self.data.shape[0]\n return True", "def load_csv(path: Path) -> Any:\n with open(path, newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n items = list(reader)\n return items", "def load_data(csv_path):\n df = pd.read_csv(csv_path)\n return df", "def __init__(self,csvrow):\n self.raw = csvrow\n data = csvrow.split(',')\n self.number = data[0]\n self.area = int(data[1])\n self.population = int(data[5])\n self.latitude = float(data[7])\n self.longitude = float(data[8])", "def read_csv(self, inputfile):\n d = csv.reader(inputfile)\n for row in d.read():\n self.translations[row[0]] = row[1]", "def _read_csv(cls, input_file, quotechar=None):\r\n with open(input_file, \"r\", encoding=\"utf-8\") as f:\r\n reader = csv.reader(f, delimiter=\",\", quotechar=quotechar)\r\n lines = []\r\n for line in reader:\r\n if sys.version_info[0] == 2:\r\n line = list(unicode(cell, 'utf-8') for cell in line)\r\n lines.append(line)\r\n return lines", "def read_csv(csv_path, fieldnames=None, restkey=None,\n restval=None, dialect='excel', *args, **kwds):\n with CSVFile(os.path.expanduser(csv_path), fieldnames=fieldnames, restkey=restkey, restval=restval,\n dialect=dialect, *args, **kwds) as csvfile:\n return csvfile", "def read_test_csv(self, file_path, header=True):\n BasePredictor.read_test_csv(self, file_path, header)\n self.obs = np.array(self.obs, dtype=np.int32)\n return", "def read_data(self) -> List[BankCSVRecord]:\n try:\n with open(self.source, \"r\") as csv_source:\n row_records = []\n reader = DictReader(csv_source)\n for row in reader:\n transformed_data = {\n \"timestamp\": datetime.strptime(\n row[\"date_readable\"], \"%d %b %Y\"\n ),\n \"trans_type\": row[\"type\"],\n \"amount\": int(row[\"euro\"]) + int(row[\"cents\"]) / 100,\n \"from\": row[\"from\"],\n \"to\": row[\"to\"],\n }\n row_records.append(BankCSVRecord(**transformed_data))\n return row_records\n except FileNotFoundError as e:\n raise ImporterSourceError(message=f\"File {self.source} not found\")\n except KeyError as e:\n raise ImporterSourceFormatError(\n message=\"Source file data does not match format\"\n )\n except Exception as e:\n raise ImporterError(message=\"Import failed!\") from e", "def import_csv(self, csvfileobject):\n # Clear previously stored info\n self._tracks = []\n self._selected = None\n\n for row in csvfileobject:\n if row[0] == \"T\":\n track = self.addTrack()\n track.properties = row\n elif row[0] == \"P\":\n period = self.addPeriod([0,1,'-'])\n period.properties = row", "def read_csvfile(inputfn):\n with open(inputfn, 'rU') as fd:\n datastruct = gen_csv_data(fd, returntype='list') # Make sure to store as list before closing file.\n return datastruct", "def read_data(self) -> List[BankCSVRecord]:\n try:\n with open(self.source, \"r\") as csv_source:\n row_records = []\n reader = DictReader(csv_source)\n for row in reader:\n transformed_data = {\n \"timestamp\": datetime.strptime(row[\"timestamp\"], \"%b %d %Y\"),\n \"trans_type\": row[\"type\"],\n \"amount\": row[\"amount\"],\n \"from\": row[\"from\"],\n \"to\": row[\"to\"],\n }\n row_records.append(BankCSVRecord(**transformed_data))\n return row_records\n except FileNotFoundError as e:\n raise ImporterSourceError(message=f\"File {self.source} not found\")\n except KeyError as e:\n raise ImporterSourceFormatError(\n message=\"Source file data does not match format\"\n )\n except Exception as e:\n raise ImporterError(message=\"Import failed!\") from e", "def __init__(self, in_csvfile, out_csvfile, col_name, cell_filler):\r\n self.in_csvfile = in_csvfile\r\n self.out_csvfile = out_csvfile\r\n self.col_name = col_name\r\n self.cell_filler = cell_filler", "def load_from_file_csv(cls):\n list_obj = []\n if os.path.exists(cls.__name__ + \".csv\"):\n with open(cls.__name__ + \".csv\", \"r\") as _file:\n str_csv = _file.read()\n _file.close()\n _dict = Base.from_json_string(str_csv)\n for obj in _dict:\n list_obj.append(cls.create(**obj))\n return(list_obj)", "def _enumerate_csv(self, csv_input):\n csv_file = open(csv_input, 'rb') \n csv_reader = csv.reader(csv_file)\n next(csv_reader, None)\n for row in reader:\n yield row", "def _load_csv(root_path, table_meta):\n relative_path = os.path.join(root_path, table_meta['path'])\n dtypes = _read_csv_dtypes(table_meta)\n\n data = pd.read_csv(relative_path, dtype=dtypes)\n data = _parse_dtypes(data, table_meta)\n\n return data", "def read_test_rf_csv():\n if os.path.exists(\"test_rf.csv\"):\n #print (\"--testing CSV imported\\n\")\n results = pd.read_csv(\"test_rf.csv\", index_col=0)\n else:\n print(\"log not found\")\n\n return results", "def _load(self, config: Dict):\n return pd.read_csv(config['path'])", "def reader(self):\n df = pd.read_csv(self.path)\n return df" ]
[ "0.7757678", "0.7520253", "0.74909085", "0.7298727", "0.72604567", "0.7150921", "0.7114749", "0.69202036", "0.69161856", "0.69122183", "0.68653274", "0.6864344", "0.6778561", "0.66752946", "0.6599824", "0.6594454", "0.6570179", "0.6554265", "0.6542523", "0.6483472", "0.6455281", "0.6403786", "0.6403786", "0.6402225", "0.6395308", "0.63694674", "0.6363399", "0.6356859", "0.634715", "0.63403815", "0.6328889", "0.6238282", "0.62237954", "0.62199754", "0.62027776", "0.61713845", "0.61557925", "0.6151217", "0.6132051", "0.6102119", "0.6095925", "0.60956717", "0.60945976", "0.60945976", "0.6094229", "0.6085062", "0.6083221", "0.60818505", "0.6077839", "0.6076509", "0.6066516", "0.6066102", "0.6062691", "0.6059886", "0.60545015", "0.60500145", "0.6049995", "0.6025193", "0.60243946", "0.6018217", "0.6014256", "0.6012317", "0.6010856", "0.6010112", "0.6009712", "0.6007132", "0.6005978", "0.59943855", "0.5989227", "0.5987429", "0.597933", "0.59765995", "0.5969041", "0.596413", "0.5959485", "0.59587306", "0.59563416", "0.59456486", "0.5943628", "0.59381855", "0.59319323", "0.59315646", "0.5923749", "0.59197724", "0.59188044", "0.5918093", "0.5904194", "0.58969903", "0.5895455", "0.5893794", "0.58892024", "0.58859557", "0.5883781", "0.587994", "0.58776826", "0.5874873", "0.58712256", "0.5871223", "0.586969", "0.5863361" ]
0.65762395
16
x_max = max(seq) x_min = min(seq) epilson = 1e6 new_seq = [10000 (epilson + x x_min )/(epilson + x_max x_min) for x in seq]
def normalization(seq): new_seq = [6.3578286171 * x for x in seq] return new_seq
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def geo_seq(val, ratio, length):\n return [val * pow(ratio, i) for i in range(length)]", "def power_e(amount, start, stop, truncated, sequence):\n ratio = .5\n for x in range(start, amount):\n y = abs(round(ratio * math.exp(x)))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence", "def power_em1(amount, start, stop, truncated, sequence):\n ratio = .25\n for x in range(start, amount):\n y = abs(round(ratio * math.expm1(x)))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence", "def buildAbs(maxVal):\n return [5*i for i in range(floor(maxVal/5)+1)]", "def log(amount, start, stop, truncated, sequence):\n ratio = 10 ** (len(str(start)) + 1)\n for x in range(start, amount):\n # y = abs(round(math.log(x, 1)))\n y = abs(round(math.log1p(x) * ratio * 5))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence", "def squares(amount, start, stop, truncated, sequence):\n for x in range(start, amount):\n y = x * x\n if truncated and y >= stop:\n sequence.append(stop)\n else:\n sequence.append(y)\n return sequence", "def generate_eps(T_low, T_high, n_e, factor = 10):\n \n E_f = E_fermi (n_e)\n eps_min = E_f - factor * T_high\n eps_max = E_f + factor * T_high\n eps_step = T_low / factor\n \n return np.arange (eps_min, eps_max+eps_step, eps_step)", "def hyperbolic_sine(amount, start, stop, truncated, sequence):\n ratio = 1\n for x in range(start, amount):\n y = abs(round(ratio * math.sinh(x)))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence", "def get_range(min, max, intervals, log):\n if not log:\n min = float(min)\n max = float(max)\n difference = max-min\n step_size = difference/intervals\n output = [min + i*step_size for i in range(intervals+1)]\n return output\n else:\n from math import log10 as log\n log_min = log(min)\n log_max = log(max)\n log_difference = log_max - log_min\n step_size = log_difference/intervals\n output = [pow(10, log_min + i*step_size) for i in range(intervals+1)]\n return output", "def _gser(a, x, eps=3.e-7, itmax=700):\n if x == 0.0:\n return 0.0\n ap = a\n sum = 1. / a\n delta = sum\n n = 1\n while n <= itmax:\n ap = ap + 1.\n delta = delta * x / ap\n sum = sum + delta\n if (abs(delta) < abs(sum) * eps):\n return (sum * np.exp(-x + a * np.log(x)))\n n = n + 1\n raise RuntimeError(\"Maximum iterations exceeded in gser\")", "def euler_scheme(f_function, initial_value, start_point=0, division=100, end_interval=1):\n result_points = [initial_value]\n h_len = 1 / division\n for step in range(division * (end_interval - start_point)):\n result_points.append(result_points[step] + h_len *\n f_function(start_point + step * h_len, result_points[step]))\n return np.array(result_points)", "def _Ep(self):\n return np.logspace(np.log10(self.Epmin.to('GeV').value),np.log10(self.Epmax.to('GeV').value),\n self.nEpd * (np.log10(self.Epmax/self.Epmin)))", "def inverse_hyperbolic_sine(amount, start, stop, truncated, sequence):\n ratio = (start + stop) / 5\n for x in range(start, amount):\n y = abs(round(ratio * math.asinh(x)))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence", "def eV(E):\n if np.max(E) < 100:\n return E * 1000\n else:\n return E", "def math_map_list(values, toMin=0, toMax=1):\n minValue = min(values)\n maxValue = max(values)\n delta = maxValue - minValue\n deltaTarget = toMax - toMin\n newValues = [toMin +(value-minValue)*deltaTarget/delta for value in values]\n return newValues", "def power(amount, start, stop, truncated, sequence):\n ratio = len(str(start)) + 1\n for x in range(start, amount):\n y = abs(round(ratio ** x))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence", "def seq_sqrt(xs):\n num_list = []\n for xs_split in xs:\n print(xs)\n xs_num = int(xs_split)\n print(xs_num)\n xs_squrt = math.sqrt(xs_num)\n print(xs_squrt)\n num_list.append(xs_squrt)\n return num_list", "def fraction(amount, start, stop, truncated, sequence):\n ratio = stop\n for x in range(start, amount):\n y = abs(round(ratio / (abs(x) + 1)))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence", "def xi(a):\n return xrange(len(a))", "def yield_spectral_range(self) -> Tuple[float, float, float]:\n return [min(self.x), max(self.x), len(self.x)]", "def softplus_list(x_):\n y_ = [np.log(1 + np.exp(-np.abs(x_[0]))) + np.maximum(x_[0], 0)]\n for i in range(1, len(x_)):\n if x_[i] is not []:\n y_ = y_ + [np.log(1 + np.exp(-np.abs(x_[i]))) + np.maximum(x_[i], 0)]\n return y_", "def sines(amount, start, stop, truncated, sequence):\n\n for x in range(start, amount):\n y = abs(round(stop * math.sin(x)))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence", "def genvals():\n vals = np.empty(200)\n vals[:50] = np.arange(50) / 50\n vals[50:100] = (50 - np.arange(50)) / 50\n vals[100:] = -vals[:100]\n return vals", "def desp_inicial(x): #Definición del desplazamiento inicial de la cuerda\r\n return np.exp(-1000*(x - longitud/2)**2)", "def EGWD_fg(f):\n A = 4.2e-47\n res = np.zeros((len(f)))\n for i,freq in enumerate(f): \n if freq >=3e-3:\n # strain \n res[i] = A * freq**(-7/3) * np.exp(-2*(freq/5e-2)**2) \n else:\n res[i] = np.NaN\n return np.array(res)", "def energy_to_lambda(energy_ev=[]):\n energy_mev = energy_ev * 1000\n lambda_array = np.sqrt(81.787 / energy_mev)\n return lambda_array", "def smoothed(sequence, step=1, start=0):\n next_index = start + 1\n last = len(sequence) \n new_sequence = []\n if not step:\n return sequence\n ratio_step = step + 1\n for item in sequence:\n new_sequence.append(item)\n if next_index < last:\n next_item = sequence[next_index]\n ratio = (item + next_item) / (step + 1)\n ratio = int(ratio)\n for x in range(step):\n value = (ratio * x) + item\n new_sequence.append(int(value))\n next_index = next_index + 1\n return new_sequence", "def regular(step, start=0.):\n\n def output(low, high):\n newstart = math.ceil((low - start)/step) * step + start\n return numpy.arange(newstart, high, step, dtype=numpy.float)\n output.func_name = \"regular(%g, start=%g)\" % (step, start)\n return output", "def normalization(x, x_min=-5.12, x_max=5.12):\n for i in range(len(x.vect)):\n x.vect[i] = x_min + x.vect[i]*(x_max-x_min)\n return x", "def define_intervals(self):\n i = 5 # a step of increment\n interval_sum = self.min_step\n interval_list = [self.min_step]\n while interval_sum < self.max_step:\n interval_sum += i\n interval_list.append(interval_sum)\n # interval_list.append(self.max_step)\n # print(\"Intervals\", interval_list)\n return interval_list", "def hypotes(amount, start, stop, truncated, sequence):\n ratio = (start + stop) / 10\n for x in range(start, amount):\n y = abs(round(ratio * math.hypot(x, start)))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence", "def scale_mag_1(x):\n return np.array([np.true_divide(ui, mag(x)) for ui in x])", "def mag(x):\n return np.sqrt(sum(i**2 for i in x))", "def softplus_inv_list(x_):\n y_ = x_\n for i in range(len(x_)):\n if x_[i] is not []:\n y_[i] = np.log(1-np.exp(-np.abs(x_[i]))) + np.maximum(x_[i], 0)\n return y_", "def normalise(values):\n max_value = max(values)\n min_value = min(values)\n factor = 32767.0 / max(max_value, abs(min_value))\n return (int(v * factor) for v in values)", "def normalize(list, max_value=1):\n maxi = max(list)\n mini = min(list)\n\n if maxi == mini or len(list) == 1:\n return list\n \n norm = []\n\n for item in list:\n new = max_value * ((item - mini) / (maxi - mini))\n norm.append(new)\n\n return norm", "def exponential_linspace_int(start, end, num, divisible_by=1):\n def _round(x):\n return int(np.round(x / divisible_by) * divisible_by)\n\n base = np.exp(np.log(end / start) / (num - 1))\n return [_round(start * base**i) for i in range(num)]", "def scale(x):\n min_x, max_x = numpy.min(x), numpy.max(x)\n if min_x != max_x:\n x = (x-min_x)/(max_x-min_x)\n else:\n # all the numbers are the same in x\n x = numpy.asarray([1/len(x) for i in range(len(x)) ])\n return x.tolist()", "def arr(N,f=\"exp\",fattore_sigma=20):\n lam=1/90\n delay=np.zeros(N)\n sigma=0\n\n\n #caso exp\n if f==\"exp\":\n delay=np.array(samp.sample_from_exp(lam/fattore_sigma,N))\n print(\"exp\")\n print(delay)\n\n if f==\"uni\":\n max_delay=fattore_sigma*np.sqrt(12)/lam\n delay=np.array(samp.sample_from_uniform(N))\n delay=delay*max_delay\n print(delay)\n\n\n #calolo arrival\n arrival=np.zeros(N)\n\n for i in range(N):\n arrival[i]=i/lam + int(delay[i])\n\n return arrival,delay", "def data_range(xs: List[float]) -> float:\n return max(xs) - min(xs)", "def _get_genome_amounts_exponential(probability, max_genome_amount):\n\t\tassert isinstance(probability, (int, float))\n\t\tassert 0 <= probability <= 1\n\t\tassert isinstance(max_genome_amount, int)\n\n\t\tfinal_amounts = []\n\t\twhile sum(final_amounts) < max_genome_amount:\n\t\t\tamount = np_random.geometric(probability)\n\t\t\tfinal_amounts.append(amount)\n\n\t\tfinal_amounts[-1] -= sum(final_amounts) - max_genome_amount\n\t\treturn final_amounts", "def positional_encoding(max_seq_len, dm):\n\n pos = np.arange(max_seq_len)[:, np.newaxis]\n i = 2 * (np.arange(dm)[np.newaxis, :]//2) / np.float32(dm)\n\n pev = pos / np.power(10000, i)\n\n # Applying SIN to odd indices\n pev[:, 0::2] = np.sin(pev[:, 0::2])\n\n # Applying COS to odd indices\n pev[:, 1::2] = np.cos(pev[:, 1::2])\n\n return pev", "def cubes(amount, start, stop, truncated, sequence):\n for x in range(start, amount):\n y = x ** 3\n if truncated and y >= stop:\n sequence.append(stop)\n else:\n sequence.append(y)\n return sequence", "def rng(x):\n\n\tm = 2**31 - 1\n\ta = 48271\n\tc = 0\n\treturn (a*x + c)%m", "def Cycle_Averaging_Factor(Uion,E):\n\treturn np.sqrt(3.0/np.pi) * np.sqrt(E) / (2*Uion)**0.75", "def dist_to_list(func, length, min=None, max=None):\n from scipy import inf\n from scipy.integrate import quad\n if min is None:\n min = -inf\n if max is None:\n max = inf\n total = quad(func, min, max)[0]\n step = float(total) / length\n return [intsolve(func, (0.5 + i) * step, min, max) for i in range(length)]", "def scale_to_start(x):\n x = (x + eps) / (x[0] + eps)\n return x", "def compute_support():\n # support_values = [i for i in range(-MAX_MARK_VALUE, MAX_MARK_VALUE + 1, 0.01)]\n\n return np.arange(-MAX_MARK_VALUE, MAX_MARK_VALUE + 1).tolist()", "def _generate_progression(prog_len, diff, start):\n return [start + diff * idx for idx in range(prog_len)]", "def initGD( X, N ):\n seq = np.ndarray(len(X), dtype=np.object)\n for i in range( len(X) ):\n a = np.floor(np.linspace(0,N-.00000001,len(X[i])))\n seq[i] = a\n return seq", "def epoching(raw, trig, size: int, offset: int = 0, standardize: bool = False):\n epochs = []\n for i, trig_idx in enumerate(trig):\n epoch_indices = range(trig_idx + offset,\n trig_idx + offset + size)\n epochs.append(raw[epoch_indices])\n\n return np.array(epochs)", "def ema(s, n):\r\n\r\n ema = []\r\n j = 1\r\n\r\n #get n sma first and calculate the next n period ema\r\n sma = sum(s[:n]) / n\r\n multiplier = 2 / float(1 + n)\r\n ema.append(sma)\r\n\r\n #EMA(current) = ( (Price(current) - EMA(prev) ) x Multiplier) + EMA(prev)\r\n ema.append(( (s[n] - sma) * multiplier) + sma)\r\n\r\n #now calculate the rest of the values\r\n for i in s[n+1:]:\r\n tmp = ( (i - ema[j]) * multiplier) + ema[j]\r\n j = j + 1\r\n ema.append(tmp)\r\n\r\n return ema", "def _getExponentialValues(self, arr):\r\n return [math.exp(val) for val in arr]", "def calc(x_list):\n\n y_list = [x**2 + 2*x + 1 for x in x_list]\n\n return y_list", "def unit(x):\n\tl = sum([i**2 for i in x])**0.5\n\treturn [xi/l for xi in x]", "def spectre_etrange(f):\n end = False\n while not end:\n try:\n line = f.readline().split()\n wavnew = [float(w) for w in line]\n wav = np.append(wav,wavnew)\n prevwav = wavnew[-1]\n except:\n end = True\n aflux = f.readlines()\n for line in aflux:\n line = re.sub('-10\\d', 'e-100', line)\n flux = np.append(flux, line.rstrip().split())\n \n wav, flux = np.array(wav), np.array(flux)\n return wav,flux", "def simpson(func, start, stop):\n return (func(start) + 4*func((start+stop)/2) + func(stop)) * (stop-start)/6", "def get_EG(vals):\n return get_tau_gap(vals)/2", "def normalize(sequence):\n return [_norm(s) for s in sequence]", "def running_mean(sequence: list):\n if not sequence:\n return []\n\n mean = []\n \"\"\"\n [1] = 1 / 1\n [1,2] = 3 / 2 \n [1,2,3] = 6 / 3\n \"\"\"\n for idx, num in enumerate(sequence):\n\n sum_total = sum(sequence[:(idx + 1)])\n result = sum_total / (idx + 1)\n\n mean.append(round(result, 2))\n\n return mean", "def f(x):\n n_particles = x.shape[0]\n j = [f_per_particle(x[i]) for i in range(n_particles)]\n #print(\"f j: \", j)\n return np.array(j)", "def gen_points(lo, hi, N):\n\treturn np.linspace(lo, hi, num=N)\n\t\n\t## a = np.array(range(0, N))\n\t## return lo + (a * (hi-lo)/float(N))", "def normalize(values):\n\n\tmax_value = float(max(map(abs, values)) or 1)\n\treturn [val / max_value for val in values]", "def mrange(start, end, steps=1):\n list = []\n i = start\n while i < end:\n list.append(i)\n i += steps\n return list", "def _precompute_xl(self, p: int) -> List[int]:\n res = [1]\n val = 1\n for _ in range(len(self._s)):\n val = (val * self.X) % p\n res.append(val)\n return res", "def test_generator_continuous():\n RANGE_MAX = 100\n prev_value = RANGE_MAX // 2\n for msg in it.islice(generate_msgs(0, RANGE_MAX), 0, 42):\n curr_value = Message.parse(msg).power\n assert curr_value - prev_value <= 1\n prev_value = curr_value", "def abscissae(self) -> List[float]:", "def eulers_method(f, y, dx, range):\n x = min(range)\n y_space = [y]\n x_space = [x]\n while x<=max(range):\n y += f(x, y)*dx\n x += dx\n x_space.append(x)\n y_space.append(y)\n return (x_space, y_space)", "def cosines(amount, start, stop, truncated, sequence):\n\n for x in range(start, amount):\n y = abs(round(stop * math.cos(x)))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence", "def delay_times_exp(min_t, max_t, t0, min_step, max_step, number_of_steps):\n delays = []\n \n after_t0 = np.logspace(np.log(min_t), np.log(t0), num = number_of_steps, endpoint = True, base = np.e)\n after_t0 = min_t + t0 - after_t0\n after_t0 = after_t0[::-1]\n after_t0 = after_t0[:-1]\n \n before_t0 = np.logspace(np.log(t0), np.log(max_t), num = number_of_steps / 2, endpoint = True, base = np.e)\n \n delays = np.concatenate([after_t0, before_t0])\n \n delays = delays.tolist()\n \n y = np.zeros_like(delays)\n \n plt.plot(delays, y, 'o')\n \n return delays", "def seq(min=0.0, max=None, inc=1.0, type=float,\n return_type='NumPyArray'):\n if max is None: # allow sequence(3) to be 0., 1., 2., 3.\n # take 1st arg as max, min as 0, and inc=1\n max = min; min = 0.0; inc = 1.0\n r = arange(min, max + inc/2.0, inc, type)\n if return_type == 'NumPyArray' or return_type == ndarray:\n return r\n elif return_type == 'list':\n return r.tolist()\n elif return_type == 'tuple':\n return tuple(r.tolist())", "def compute_fixed_moments(self, x):\n return [x]", "def test_sequence(self):\n self.assertEqual([1, -3, 9, -27, 81, -243],\n [x for x in GeometricProgression(6, 1, -3)])\n\n self.assertEqual([1, 1, 1, 1, 1],\n [x for x in GeometricProgression(5, 1, 1)])\n\n self.assertEqual([4, 40, 400, 4000, 40000],\n [x for x in GeometricProgression(5, 4, 10)])", "def __generate_fenotype(self):\n \n \"\"\"\n Equation:\n x = min + (((max - min) * genotype) / (2 ** genotype_length - 1))\n \"\"\"\n \n self.fenotype = self.__min_interval + \\\n (((self.__max_interval - self.__min_interval) * int(self.genotype, 2)) \\\n / (2 ** self.__individual_genotype_length - 1))", "def generation(x, g):\n return int(x/g)", "def calc_emission(self, emission, fingers):\n return math.sqrt(sum(item*item for item in emission[:-1])) / emission[-1]", "def relative_range(self):\n self.calculate_relative_mags()\n string = '{:.0f}-{:.0f}Hz: {:.5f}'\n s_ind = self.get_bin(self.s_freq)\n e_ind = self.get_bin(self.e_freq)\n lst = self.rel_mags[s_ind:e_ind+1]\n return sum(lst)/len(lst)", "def _get_genome_amounts_geometric_fix(num_real_genomes, max_genome_amount, geometric_probability=0.3):\n\t\tassert isinstance(num_real_genomes, int)\n\t\tassert isinstance(max_genome_amount, int)\n\n\t\tfinal_amounts = [1] * num_real_genomes\n\t\tindex = 0\n\t\twhile index < len(final_amounts):\n\t\t\tif sum(final_amounts) >= max_genome_amount:\n\t\t\t\tbreak\n\t\t\tfinal_amounts[index] += 1 + np_random.geometric(geometric_probability)\n\t\t\tindex += 1\n\n\t\tfinal_amounts[index-1] -= sum(final_amounts) - max_genome_amount\n\t\treturn final_amounts", "def create_omega_list(self, seqstring):\n lomega = [] # list of omega\n res1 = seqstring[0]\n if len(seqstring) < 100: # that means less than 99 peptide bonds\n temp = \"{0}{1}{2:02d}\" # template for formatting\n else:\n temp = \"{0}{1}{2:03d}\"\n for k, res2 in enumerate(seqstring[1:]):\n # using k+1, in order to make the naming consistent with that\n # produced by calc_omega.py in myg_tools/pybin.\n lomega.append(temp.format(res1, res2, k+1))\n res1 = res2 # moving forward\n return lomega", "def foo(X):\n ret = []\n for x in X:\n r = 2*math.sqrt(sum([n*n for n in x]));\n if r == 0:\n ret.append(0)\n else:\n ret.append(math.sin(r) / r);\n return ret", "def floating_point_generator():\n i = 0\n while True:\n yield str((i % 5) * 1.1)\n i += 1", "def sequence(side_length):\r\n index = side_length\r\n numbers = []\r\n tmp1 = (index -1 ) / 2\r\n #numbers.append([index, 3, 5, 7, 9])\r\n for i in range(tmp1):\r\n if i == 0:\r\n numbers.append([3, 3, 5, 7, 9])\r\n else:\r\n diff = (3+i*2) - 1\r\n tmp2 = numbers[i-1][4] + diff\r\n numbers.append([3+i*2, tmp2, tmp2+diff, tmp2+diff*2, tmp2+diff*3])\r\n return numbers", "def i0(x):\n return tt.switch(tt.lt(x, 5), 1 + x**2 / 4 + x**4 / 64 + x**6 / 2304 + x**8 / 147456\n + x**10 / 14745600 + x**12 / 2123366400,\n np.e**x / (2 * np.pi * x)**0.5 * (1 + 1 / (8 * x) + 9 / (128 * x**2) + 225 / (3072 * x**3)\n + 11025 / (98304 * x**4)))", "def single_run(steps_number):\n values = list()\n numerator = 0\n for i in trange(1, steps_number):\n\n numerator += generate_episode()\n\n values.append(numerator / i)\n\n return np.array(values)", "def axis_range ( xmin , xmax , delta = 0.05 , log = False ) :\n xmn = min ( xmin , xmax )\n xmx = max ( xmin , xmax )\n \n import math\n \n ## 1) special case\n if isequal ( xmn , xmx ) :\n return math.floor ( xmn - 0.1 ) , math.ceil ( xmx + 0.1 ) \n\n ## 2) special case\n if islong ( xmn - 0.5 ) and islong ( xmn + 0.5 ) :\n return math.floor ( xmn - 0.1 ) , math.ceil ( xmx + 0.1 ) \n\n d = xmx - xmn\n \n if 0 <= xmn < xmx :\n \n xmin = max ( 0 , xmn - delta * d )\n xmax = xmx + delta * d \n \n elif xmn < xmx <= 0 :\n \n xmin = xmn - delta * d \n xmax = max ( 0 , xmx + delta * d )\n \n elif xmn < 0 < xmx :\n \n xmin = ( 1 + delta ) * xmn \n xmax = ( 1 + delta ) * xmx\n \n else : \n \n xmin = xmn - delta * d \n xmax = xmx + delta * d \n\n N = 3\n \n a1 , b1 = frexp10 ( xmin )\n a2 , b2 = frexp10 ( xmax )\n\n b1 -= N \n b2 -= N \n \n xmin = math.floor ( a1 * ( 10**N ) ) * ( 10 ** b1 )\n xmax = math.ceil ( a2 * ( 10**N ) ) * ( 10 ** b2 )\n \n return xmin , xmax", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n return (e_x / e_x.sum()).tolist()", "def em_phase(indivs):\r\n\t#get the start frequency using dirichlet distribution.\r\n\thyplo_collection=[]\r\n\tindiv_dict=defaultdict(list)\r\n\t# hyplo_dict=defaultdict(float)\r\n\tres=[]\r\n\tres_pairs=[]\r\n\tfor x in indivs:\r\n\t\tdecom=decompose_acurate(x)\r\n\t\tindiv_dict[x]+=decom\r\n\t\thyplo_collection+=list(itertools.chain.from_iterable(decom))\r\n\treturn em(indiv_dict, hyplo_collection)", "def denormalize(images, min_, max_):\n return [((i + 1) / 2 * (max_ - min_)) + min_ for i in images]", "def binaire(x,n):\n a,q = [],0\n \n for i in range(n):\n q = x%2\n x //=2\n a = [q] + a\n \n return(a)", "def exp(X):\n X = np.maximum(X,100)\n return np.exp(X)", "def exponential_interval(\n initial: float = 0.1,\n multiplier: float = 2,\n maximum: Optional[float] = None,\n minimum: Optional[float] = None,\n) -> Generator[float, None, None]:\n val = initial\n while True:\n if minimum is not None and val < minimum:\n yield minimum\n if maximum is not None and val > maximum:\n yield maximum\n else:\n yield val\n val *= multiplier", "def E_vs_length(Emax, Emin, wmax=90, wmin=10, Lmax=1000, Lmin=102.4, p=75,\r\n fmax=5.7e9, p1=database['K+'], p2=database['pi+'],\r\n p3=database['p+'], delta_p=1.6e-2, nE=10, nw=10, nL=10, ng=50,\r\n nl=50, nf=20, L_resolution=0.01, w_resolution=0.01, plot=True,\r\n set_freq=5.7e9, count_L=False, count_w=True, count_E=True):\r\n E_range = np.logspace(np.log10(Emin), np.log10(Emax), int(nE))\r\n plot_E, w, length, intensity = [], [], [], []\r\n for E in E_range:\r\n if count_E == True:\r\n if count_w == True:\r\n print(f'E = {round(E, -3)} MV/m')\r\n else:\r\n print(E)\r\n output = efficiency_vs_w(wmax, wmin, Lmax, Lmin, p, fmax, p1, p2, p3,\r\n E, delta_p, nw, nL, ng, nl, nf, L_resolution,\r\n w_resolution, False, set_freq, count_L, count_w)\r\n if output != None:\r\n plot_E.append(E*1e-6)\r\n w.append(output[0])\r\n length.append(output[1])\r\n intensity.append(output[2])\r\n file = open(f'Save_Data_{Emin}_{Emax}.txt','a')\r\n file.write(f'{[plot_E[-1], w[-1], length[-1], intensity[-1]]}\\n')\r\n file.close()\r\n if plot == True:\r\n fig = plt.figure(figsize=[10, 5])\r\n ax = fig.add_subplot(1, 1, 1)\r\n fig.subplots_adjust(right=0.75)\r\n line1, = ax.plot(plot_E, length, 'r', lw=2, label='Target Distance')\r\n ax2 = ax.twinx()\r\n line2, = ax2.plot(plot_E, intensity, 'g', lw=2, label='Intensity Required')\r\n ax3 = ax.twinx()\r\n ax3.spines['right'].set_position(('axes', 1.2))\r\n make_patch_spines_invisible(ax3)\r\n ax3.spines['right'].set_visible(True)\r\n line3, = ax3.plot(plot_E, w, 'b', lw=2, label='Collimator Width')\r\n ax.set_xlabel(r'Electric Field Strength / MVm$^{-1}$', fontsize=20)\r\n ax.set_xlim(np.min(plot_E), np.max(plot_E))\r\n ax.set_ylabel('Target Distance / m', fontsize=20, color=line1.get_color())\r\n ax2.set_ylabel(r'Intensity / I$_0$', fontsize=20, color=line2.get_color())\r\n ax3.set_ylabel('Collimator Width / mm', fontsize=20, color=line3.get_color())\r\n ax.tick_params(axis='y', colors=line1.get_color())\r\n ax2.tick_params(axis='y', colors=line2.get_color())\r\n ax3.tick_params(axis='y', colors=line3.get_color())\r\n lines = [line1, line2, line3]\r\n ax.legend(lines, [l.get_label() for l in lines], loc='upper center', fontsize=15)\r\n ax.set_xscale('log')\r\n ax.minorticks_on()\r\n ax2.minorticks_on()\r\n ax3.minorticks_on()\r\n ax.grid()\r\n plt.show()\r\n return [w, length, intensity]", "def extrema(self, embs, lens): # embs: [batch_size x seq_len x emb_size] lens: [batch_size]\n # Find minimum and maximum value for every dimension in predictions\n batch_size, seq_len, emb_size = embs.shape\n max_mask = np.zeros((batch_size, seq_len, emb_size), dtype=np.int)\n for i, length in enumerate(lens):\n max_mask[i, :length, :] = 1\n min_mask = 1 - max_mask\n seq_max = (embs * max_mask).max(1) # [batch_sz x emb_sz]\n seq_min = (embs + min_mask).min(1)\n # Find the maximum absolute value in min and max data\n comp_mask = seq_max >= np.abs(seq_min) # [batch_sz x emb_sz]\n # Add vectors for finding final sequence representation for predictions\n extrema_emb = seq_max * comp_mask + seq_min * np.logical_not(comp_mask)\n return extrema_emb", "def exponential_decay(mul, base, n, min=0.1):\n return np.vstack([mul * np.power(base, np.arange(n)), [min] * n]).max(0).tolist()", "def arange(start: float, stop: float, step: float = 1.0) -> list[float]:\n start = decimal.Decimal(str(start))\n stop = decimal.Decimal(str(stop))\n step = decimal.Decimal(str(step))\n if step <= 0:\n raise ValueError('Step must be >= 0')\n out = []\n current = start\n while current < stop:\n out.append(float(current))\n current += step\n return out", "def __init__(self,in_range,mean,std,npoints=200):\n x_p = np.linspace(in_range[0],in_range[1],npoints,endpoint=True)\n y_p = np.zeros(npoints)\n yc = 0\n stdinv = 1.0/std\n stdinvsq = stdinv**2\n normalc = stdinv*(1.0/np.sqrt(np.pi))\n for x in x_p:\n expon = -(x - mean)**2 * (0.5*stdinvsq)\n y = normalc * np.exp(expon)\n y_p[yc]=y\n yc+=1\n self.x = x_p\n self.y = y_p\n self.sigma = std\n self.mean = mean\n self._normconst = normalc\n self.upper = in_range[1]\n self.lower = in_range[0]\n self._dx = x_p[1]-x_p[0]\n self.npoints = npoints\n return", "def rangeLin(min, max, n):\n\n return np.arange( min, max, (max-min)/n )", "def M(f):\n return 1127 * numpy.log(1 + f/700.0)", "def normalise_modular_range(value, min, max):\n return numpy.mod(value-min, max-min)+min", "def scaleByMax(ldata):\n\tm = max(ldata)\n\treturn list(map(lambda e : e/m, ldata))" ]
[ "0.6177466", "0.61661714", "0.5975783", "0.5779353", "0.56950027", "0.56275076", "0.5592511", "0.5567296", "0.5561729", "0.54725146", "0.5414097", "0.5412501", "0.54114294", "0.53760564", "0.5344241", "0.5343695", "0.53412575", "0.53345144", "0.5287912", "0.5285122", "0.5279148", "0.5275514", "0.52423006", "0.52398574", "0.5227902", "0.52255356", "0.5225248", "0.51998794", "0.51989126", "0.5198228", "0.51938576", "0.51920617", "0.51918125", "0.51877993", "0.5176806", "0.5159756", "0.5158873", "0.51556903", "0.5128974", "0.5105651", "0.5100253", "0.5091754", "0.5089101", "0.5062776", "0.50576687", "0.50542223", "0.50511515", "0.5044317", "0.5043686", "0.5041388", "0.50393724", "0.50270355", "0.50256556", "0.50236285", "0.5021069", "0.50191444", "0.5009987", "0.4999635", "0.49930155", "0.49904594", "0.49899048", "0.49857974", "0.4984023", "0.49839103", "0.4982173", "0.4979075", "0.49751645", "0.49720788", "0.4969554", "0.49598935", "0.49559546", "0.4950022", "0.49480245", "0.49423802", "0.49351352", "0.49343455", "0.49276793", "0.4924987", "0.49249446", "0.49167997", "0.49153727", "0.49143657", "0.4906715", "0.49060252", "0.49050242", "0.49003127", "0.48987266", "0.48962194", "0.48961812", "0.48917368", "0.48819894", "0.48781833", "0.48781154", "0.48772413", "0.48742", "0.48730618", "0.48708558", "0.48632154", "0.48622715", "0.48606396" ]
0.6463649
0
Export flat list fo file
def save_list(list_data, path, lineterminator='\n', encoding=None, mode='w'): with open(path, mode) as f: list_data = [item + lineterminator for item in list_data] if encoding is not None: list_data = [item.encode(encoding) for item in list_data] f.writelines(list_data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write(lst):\n # TODO", "def write_list(self):\n with open(self.path, 'w') as file:\n for i in map(self.addziros, range(1, int(str(1) + self.number_length * '0') + 1)):\n file.write(i + '\\n')\n file.close()", "def export(tako_list, filename):\n for tak in tako_list:\n tak = tak[0]\n l1 = [tak.ident, \"a\"]\n for gen in tak.genome.weightchr_a:\n l1.append(gen.ident)\n l1.append(gen.weight)\n l1.append(gen.mut_rate)\n l1.append(gen.dom)\n f = os.path.join(\"Data\", (filename[:-4] + \" gene data.csv\"))\n with open(f, 'a', newline=\"\") as csvfile:\n writ = csv.writer(csvfile)\n writ.writerow(l1)\n if len(tak.genome.weightchr_b) != 0:\n l2 = [tak.ident, \"b\"]\n for gen in tak.genome.weightchr_b:\n l2.append(gen.ident)\n l2.append(gen.weight)\n l2.append(gen.mut_rate)\n l2.append(gen.dom) \n writ.writerow(l2)", "def list_to_file(l, file_name):\r\n fw = open(file_name, 'w', encoding = 'utf-8')\r\n fw.write('\\n'.join(l))\r\n fw.close()", "def write_list_to_file(myList, filename):\r\n\r\n with open(filename, \"w\") as outfile:\r\n for entries in myList:\r\n outfile.write(entries)\r\n\t\t\t# add a return after each line\r\n outfile.write(\"\\n\")", "def save_list_to_file(the_list, filepath):\n with open(filepath, 'w') as file_handler:\n for item in the_list:\n file_handler.write(\"{}\\n\".format(item))", "def __writeToFile(self, filePath, lst): \n \n if not self.outDir is None: \n filePath = os.path.join(self.outDir, filePath) \n \n open(filePath,'a').writelines(lst)", "def gp_file(data,filename,output_dir='',order = [],head = False):\n f = open(output_dir + filename + '.csv', 'w')\n f.write(str(len(order)-1) + '\\n')\n write = csv.writer(f)\n write.writerows(manip.dic_to_list(data,order,head),)\n f.closed\n\n return None", "def SaveListFile(file,lst):\n\tlst = [str(i) +\"\\n\" for i in lst]\n\tif len(lst) == 0:\n\t\treturn\n\twith open(file,'w') as f:\n\t\tf.writelines(lst)\n\treturn lst", "def result_file(accession_list):\n with open(\"../accessions_list.txt\", 'w') as file:\n file.write(accession_list)", "def save_all_ne_as_list_to_txt(self):\n #write the output\n outfile = open(('ne_list_all_' + self.lang + '_' + self.method +\n '.txt'), 'w')\n for sublist in self.named_entity_list_total:\n for entry in sublist:\n outfile.write(entry[0]+'\\t'+entry[3]+'\\n')\n outfile.close()", "def exportList(self, list_id):\n params = {'LIST_ID' : list_id,\n 'EXPORT_TYPE' : 'ALL',\n 'EXPORT_FORMAT': 'CSV',\n 'FILE_ENCODING': 'utf-8'}\n xrequest = xml_str(self.buildRequestEtree('ExportList', params))\n xresults = self.request(xrequest)\n xpath = '/Envelope/Body/RESULT/FILE_PATH'\n return xresults.xpath(xpath)[0].text", "def export(self, fname):\n f = open(fname, 'w')\n for ue in self.ue_list:\n line_components = list()\n line_components.append(ue.expression)\n line_components.append(ue.meaning)\n print >>f, '\\t'.join(line_components).encode('utf-8')", "def persist_list_to_csv(liste, nom_fichier):\n with open(nom_fichier, 'w') as f:\n for elem in liste :\n f.write(\"{}\\n\".format(elem))", "def export_data(self):\n folder = os.path.dirname(self.filename[0])\n filename_ext = os.path.basename(self.filename[0])\n filename = os.path.splitext(filename_ext)[0] #get filename without extension\n\n path = folder + \"/\" + filename + \"_fit_results.txt\"\n if not os.path.exists(path):\n file = open(path, \"w+\")\n else:\n file = open(path, \"a+\")\n\n for i in range(len(self.data_list)):\n file.write(self.data_list[i] + \"\\n\\n\")\n\n self.data_list = []\n file.close()", "def save_list(lines, filename):\n data = '\\n'.join(lines)\n file = open(filename, 'w')\n file.write(data)\n file.close()", "def export_part_list(self, filetype='xlsx'):\n if filetype == 'csv':\n enum = 48649\n else:\n enum = 48642\n path = self.export_dir.joinpath(self.partcode).joinpath('part_list.xlsx')\n self.doc.Sheets(1).PartsLists(1).Export(str(path), enum)", "def save_list_of_list(data, path, lineterminator='\\n', encoding=None):\n with open(path, 'w') as f:\n writer = csv.writer(f, lineterminator=lineterminator)\n if encoding is not None:\n data = [[item.encoding(encoding) for item in items]\n for items in data]\n writer.writerows(data)", "def print_list_to_file(lines, path):\n create_dir_for_file(path)\n with open(path, 'w+') as f:\n for l in lines:\n print>>f, l", "def write_list(l, fname):\n thefile = open(fname, \"w\")\n for line in l:\n thefile.write(\"%s\\n\" % line)\n thefile.close()", "def to_list(self):\n self.error_throw('output')\n\n if self.rank_method == methods_of_ranking[3]: #'diversified_ranking'\n export_list = self.output_div('list')\n else:\n export_list = self.output('list')\n return export_list", "def write_into_file(name, liste):\n file = open(name, \"w\")\n for item in liste:\n file.write(item)\n file.write('\\n')\n file.close()", "def output_to_file(utilist, filepath=\"demo.csv\"):\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n with open(filepath, \"a\") as f:\n f.write(utilist + \"\\n\")", "def csv_file(data,output_dir,filename,order = [],head = True):\n with open(output_dir + filename + '.csv', 'w') as f:\n write = csv.writer(f)\n write.writerows(manip.dic_to_list(data,order,head),)\n return None", "def write_list_to_file(program, list_to_write):\n with open(program.split('.')[0] + \".output.json\", 'a+') as output_file:\n output_file.write(json.dumps(list_to_write, indent=3, sort_keys=False))", "def generate_csv(self, lista):\r\n\t\ts = ''\r\n\t\tsalida = self.get_rel_path() + \"/\" + \"tree_names.csv\"\r\n\t\tfor i in lista:\r\n\t\t\t#st = i[2].split('/')\r\n\t\t\t#newpath = os.path.join(i[1],st)\r\n\t\t\thash = str(i[0])\r\n\t\t\tname_path = str(i[1] + \"/\" + i[2])\r\n\t\t\t#s = s + str(i[0]) + \";\" + i[1] + \"/\" + i[2] + \"\\n\"\r\n\t\t\tself.copy_file(hash,name_path)\r\n\t\t\ts = s + str(hash + \";\" + name_path + \"\\n\")\r\n\r\n\t\tf = open(salida,\"w\")\r\n\t\tf.write(s)\r\n\t\treturn salida", "def _toFile(self):\n pass", "def write_list_file(output_file, clip_list_arr):\n list_file = output_file+'_clip_list.txt'\n print \"list_file: \", list_file\n f = open(list_file, 'w')\n for clip in clip_list_arr:\n line = 'file '+clip\n f.write(\"%s\\n\" % line)\n # Add in a divider movie between clips? (it could go here)\n f.close()\n # print 'list_file', list_file\n # print clip_list_arr\n\n return list_file", "def write_list(outputfilename, list):\r\n try:\r\n with open(outputfilename, 'w', newline='', encoding='utf-8') as outfile:\r\n itemwriter = csv.writer(outfile, delimiter=\",\")\r\n for item in list:\r\n itemwriter.writerow(item)\r\n except:\r\n input(\"File still open! Please close and press enter to continue\")\r\n with open(outputfilename, 'w', newline='', encoding='utf-8') as outfile:\r\n itemwriter = csv.writer(outfile, delimiter=\",\")\r\n for item in list:\r\n itemwriter.writerow(item)", "def write_list(args, file_list):\n if not args.listfile.endswith(\".txt\"):\n args.listfile += \".txt\"\n outputfile = open(args.listfile, 'w')\n for name in file_list:\n outputfile.write(name)\n outputfile.write(\"\\n\")\n outputfile.close()", "def save_list_to_file(content: list, dst_path: str, append=False) -> None:\n with io.open(file=dst_path, mode=\"a\" if append else \"w\", encoding='utf-8') as destination_file:\n for element in content:\n destination_file.write(element + \"\\n\")", "def writeChronListToFile(self):\n ## write header\n for header_line in self.outData['header']:\n self.outFile.write(header_line + '\\n')\n ##loop through each msg list\n for msg_list in self.outData_temp:\n ## create line\n msg_line = reconstructLine(msg_list)\n ## write to file\n self.outFile.write(msg_line + '\\n')", "def create_data_file_from_list(lst, out_filename, dtype, shape):\n with open(out_filename, 'wb+') as out_file:\n out_file = open(out_filename, 'wb+')\n dat_file = np.memmap(out_file, dtype=dtype, shape=shape)\n dat_file[:] = lst[:]\n dat_file.flush()\n size = float(dat_file.nbytes) / (1024 ** 2)\n print('written %s : %.3f MB' % (out_filename, size))", "def outputFunc(filename, resultList):\n #assert len(parks) == 3\n \n f = open(filename, 'wt')\n \n try:\n writer = csv.writer(f)\n for i in range(len(resultList)):\n print resultList[0]\n writer.writerow(resultList[i])\n \n finally:\n f.close()", "def writeList2File(filename, array, overwrite=False, separator=';'):\n mode = 'a'\n if overwrite:\n mode = 'w'\n file = open(filename, mode)\n file.write(separator.join(map(str,array)) + '\\n')", "def export_file(self):\n if self.args.keyfilter:\n self.filter_keys()\n if self.args.datafilter:\n self.filter_values()\n json.dump(self.outputdata, self.outfile, indent=self.args.indent)\n self.outfile.write('\\n')", "def export_data(fp, app_name):\n from otree.views.admin import get_display_table_rows\n colnames, rows = get_display_table_rows(\n app_name, for_export=True, subsession_pk=None)\n colnames = ['{}.{}'.format(k, v) for k, v in colnames]\n writer = csv.writer(fp)\n writer.writerows([colnames])\n writer.writerows(rows)", "def export(fileprefix, hedges):\n with open(fileprefix + '.txt', 'w') as f:\n for h in hedges:\n s = \"\"\n for node in h[0]: #each node in the tail\n s += str(node) + \"|\"\n s = s[:-1]\n s += '\\t'\n for node in h[1]: #each node in the head\n s += str(node) + \"|\"\n s = s[:-1]\n s += '\\t'\n s += '1' + '\\n' #assigns weight for the hedge, currently always set to 1\n f.write(s)", "def list_to_file(itemlist, filename):\n # Create dir if needed\n dir_path = os.path.dirname(filename)\n if not os.path.isdir(dir_path):\n os.makedirs(dir_path)\n\n # Delete existing file\n if os.path.exists(filename):\n os.remove(filename)\n\n # itemlist.sort()\n\n # Write new file\n with open(filename, 'w') as f:\n fname = os.path.basename(filename)\n if 'local' in fname:\n f.write('# Local development dependencies go here\\n-r base.txt\\n\\n')\n if 'production' in fname:\n f.write('# Pro-tip: Try not to put anything here. Avoid dependencies in production that aren\\'t in development.\\n-r base.txt\\n\\n')\n if 'test' in fname:\n f.write('# Test dependencies go here.\\n-r base.txt\\n\\n')\n if 'subdependencies' in fname:\n f.write('# Sub-dependencies (i.e. most likely dependencies of top level dependencies).\\n-r base.txt\\n\\n')\n for item in itemlist:\n f.write('%s\\n' % item)", "def list_to_file(sorted_list, filename):\n doc = Document()\n table = doc.add_table(rows=1, cols=2)\n hdr_cells = table.rows[0].cells\n hdr_cells[0].text = 'Word'\n hdr_cells[1].text = 'Occurrence'\n\n for key, value in sorted_list:\n row_cells = table.add_row().cells\n row_cells[0].text = key\n row_cells[1].text = str(value)\n\n doc.save(\"sorted - \" + filename)", "def write_list_to_file(input_list, output_folder, delimiter=\" \", header=None):\n with open(output_folder, 'w') as doc_out:\n if header:\n doc_out.write(delimiter.join(header) + \"\\n\")\n for element in input_list:\n doc_out.write(delimiter.join([str(i) for i in element]) + \"\\n\")", "def write_groups2file(filename_basis, groups, pneumonia_entity_list):\n for i, indices in enumerate(groups):\n group_entities = list(np.array(pneumonia_entity_list)[indices])\n write2json(filename_basis + str(i) + '.json', group_entities)", "def write_list_to_file(file_name: str, list_name: List[str]):\n # Write to a file, overwriting the old contents\n file = open(file_name, 'w')\n\n # Loop through the list, append a newline character to each line\n for item in list_name:\n file.writelines(item + '\\n')\n\n # Close the file\n file.close()", "def outTxt(data, outPath, fileName):\n\n with open(outPath+fileName, \"wb\") as f:\n f.write(\"index,link,name,rating,review,price,category,neighborhood,address,phone,feedback\\n\")\n for record in data:\n f.write(\"%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\\n\" % \\\n (record[0],record[1],record[2],record[3],record[4],record[5],record[6],\\\n record[7],record[8],record[9],record[10]))", "def save_to_file(cls, list_objs):\n the_list = []\n if list_objs is not None:\n for stuff in list_objs:\n new_stuff = stuff.to_dictionary()\n the_list.append(new_stuff)\n the_list = Base.to_json_string(the_list)\n with open(\"{}.json\".format(cls.__name__), mode='w') as f:\n f.write(str(the_list))", "def savealist(alist, filename):\n out = open(filename, \"w\")\n for i in alist:\n out.write(str(i) + \"\\n\") # if i is numeric\n out.close()", "def writeAlltoFile(self):\n with open(self._fname, 'w') as f:\n for elem in self.getAll():\n line = self._writeGratoLine(elem)\n f.write(line + \"\\n\")\n f.close()", "def exportFoldFile(vectors, authors, fileName):\n with open(fileName, \"w\") as fFile:\n for idv, vec in enumerate(vectors):\n [fFile.write(str(val)+',') for val in vec]\n fFile.write(authors[idv] + '\\n')", "def export_samfile(self):", "def write_listing(listing, dep, pom):\n print(f'| {dep.groupId} | {dep.artifactId} | {dep.version} ', end='', file=listing)\n print(f'| {pom.authors} ', end='', file=listing)\n print(f'| {pom.license.name} | {pom.license.url} ', end='', file=listing)\n print(f'|', file=listing)", "def generate_ip_list_file():\n gip, mask = get_network_info()\n ips = ipaddress.IPv4Network(f\"{gip}/{mask}\")\n\n file_output = \"\\n\".join([str(ip) for ip in ips])\n\n with open(IP_LIST_PATH, 'w') as f:\n f.write(file_output)", "def save_data_to_file(file_name, list_of_product_objects):\r\n try:\r\n objF = open(file_name, \"w\")\r\n for row in list_of_product_objects:\r\n objF.write(str(row[0]) + \",\" + str(row[1]) + \"\\n\")\r\n objF.close()\r\n except IOError:\r\n print(\"Unable to locate file\")", "def save_to_file_csv(cls, list_objs):\n l = []\n if list_objs is not None:\n for item in list_objs:\n l.append(item.to_dictionary())\n with open(\"%s.csv\" % cls.__name__, mode='w') as f:\n f.write(Base.to_json_string(l))", "def writeStrListToFile(ldata, filePath, delem=\",\"):\n\twith open(filePath, \"w\") as fh:\n\t\tfor r in ldata:\n\t\t\tif type(r) == list:\n\t\t\t\tr = delem.join(r)\n\t\t\tfh.write(r + \"\\n\")", "def create_output_file(arr):\r\n for i in arr:\r\n output_file.write(f'{i[0]}\\t{i[1]}\\n')", "def save_list(filename:str, seg_sents:List[List[str]]):\n\twith open(filename, 'w', encoding=\"utf-8\") as f:\n\t\tfor sent in seg_sents:\n\t\t\tsentence = \" \".join(sent)\n\t\t\t# print(sentence)\n\t\t\tf.write(sentence + '\\n')", "def experiment_list(obj, outfile):\n warnings.warn(\n \"use .as_file() on the experimentlist directly\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n obj.as_file(outfile)", "def save_to_file(cls, list_objs):\n li = []\n with open(cls.__name__ + \".json\", mode=\"w\") as fl:\n if list_objs is None:\n fl.write(Base.to_json_string(list_objs))\n return\n for i in list_objs:\n li.append(i.to_dictionary())\n fl.write(Base.to_json_string(li))", "def dump_data(data,file_name=default_file_name):\n fille = abs_path(file_name)\n create_file(fille)\n dictt = extract_data()\n rank = 1\n for item in data:\n info = create_info_dict(rank,item)\n # info = create_info_list(rank,item)\n dictt['Students'].append(info)\n rank +=1\n write_data(dictt)", "def save_to_file(cls, list_objs):\n my_list = []\n if list_objs or list_objs is not None:\n my_list = [obj.to_dictionary() for obj in list_objs]\n with open(cls.__name__ + '.json', 'w+') as f:\n f.write(cls.to_json_string(my_list))", "def save_to_file(cls, list_objs):\n filename = cls.__name__ + \".json\"\n new_list = []\n with open(filename, \"w\") as fp:\n if list_objs is None:\n fp.write(\"[]\")\n else:\n for objs in list_objs:\n new_list.append(cls.to_dictionary(objs))\n fp.write(cls.to_json_string(new_list))", "def write_data_file(output_file: str, companies: list):\n with open(output_file, \"w\") as f:\n # s = \"\\n\".join(companies)\n for i in range(len(companies)):\n for k in range(10):\n for j in range(len(companies[i].data[k])):\n s = f\"{i},{companies[i].data[k][j][0].__str__()},{companies[i].data[k][j][1]}\\n\"\n f.write(s)", "def write_the_contents_to_the_same_file(self):\n if not len(self.student_list):\n print('There is no contents to write')\n return\n\n if self._filename is None:\n self._filename = self.input_filename()\n\n with open(self._filename, 'w') as OUT:\n OUT.write(self.student_list.to_csv(date_format='%Y-%m-%d',\n sep='\\t', header=False, columns=self.columns_to_save))\n print(f'Data are saved into {self._filename!r}')", "def save_to_file(cls, list_objs):\n namefile = cls.__name__ + \".json\"\n rep_list = []\n if list_objs is not None and list_objs != []:\n for item in list_objs:\n repre = cls.to_dictionary(item)\n # rep_list.append(cls.to_json_string(repre))\n rep_list.append(repre)\n\n with open(namefile, \"w\", encoding=\"UTF-8\") as f:\n # json.dump(rep_list, f)\n f.write(cls.to_json_string(rep_list))", "def save_to_file(cls, list_objs):\n filename = cls.__name__ + \".json\"\n with open(filename, \"w\") as f:\n if list_objs is None:\n f.write(\"[]\")\n else:\n d = [x.to_dictionary() for x in list_objs]\n f.write(Base.to_json_string(d))", "def to_file(self, f: str) -> None:\n with open(f, \"w\") as open_file:\n open_file.write(\"\\n\".join(self.itos) + \"\\n\")", "def writeCSV(path,aList):\n\twith open(path,'wb') as w:\n\t\ta = csv.writer(w, delimiter = ',')\n\t\ta.writerows(aList)\n\tw.close()", "def write_list_to_file(ls, save_path):\n # Open in appendation mode given that this function may be called multiple\n # times on the same file (positive and negative sentiment are in separate\n # directories).\n out_file = open(save_path, \"w+\")\n for example in ls:\n out_file.write(example)\n out_file.write('\\n')", "def generate_filelist(FILELIST):\n # Ensure output file exists\n call(['touch', FILELIST])\n\n with open(FILELIST, 'a') as fw:\n for df in DataFiles.objects.all():\n fw.writelines([df.archive_path, \"\\n\"])", "def save2file(lis, path):\r\n np.save(path, np.array(lis))", "def iGetList(sess, iPaths, destFolder):\n ensure_dir(destFolder)\n print \"Write to: \", destFolder\n for iPath in iPaths:\n buff = sess.data_objects.open(iPath, 'r').read()\n with open(destFolder+'/'+os.path.basename(iPath), 'wb') as f:\n f.write(buff)", "def formata_listagem(self, listagem, diretorio=None):\r\n listagem = sorted(listagem, key=lambda k: int(k['docnumero'])) \r\n if not diretorio:\r\n diretorio=\"/u1/caixa/dev/tty1/listagem\"\r\n with open(diretorio,\"w\") as file:\r\n for oc in listagem:\r\n\t\tif int(oc[\"docsitcodigo\"]) == 999: # Sequencia nao utilizadas\r\n\t\t line = \"{:25s} {:0>3d} {:0>8d} {:44s} {:3d} \\n\".format(\" \", \r\n\t\t int(oc[\"docserie\"]), int(oc[\"docnumero\"]), oc[\"descricao\"],\r\n\t\t\t int(oc[\"docsitcodigo\"])) \r\n else:\r\n\t\t line = \"{} {:0>3d} {:0>8d} {} {:3d} {:7.2f}\\n\".format(oc[\"dhrecbto\"], \r\n\t\t int(oc[\"docserie\"]), int(oc[\"docnumero\"]), oc[\"docchaacesso\"],\r\n\t\t\t int(oc[\"docsitcodigo\"]), float(oc[\"docvlrtotal\"])) \r\n\t file.write(line)", "def save_to_file(cls, list_objs):\n l = []\n if list_objs is not None:\n for item in list_objs:\n l.append(item.to_dictionary())\n with open(\"%s.json\" % cls.__name__, mode='w') as f:\n f.write(Base.to_json_string(l))", "def afisare_filme(self,list):\n with open(\"filme.txt\", 'r') as f: # modificam si in fisier\n lines = f.readlines()\n for line in lines:\n line_sep = line.split('/')\n list.append(line_sep[0]+\" \"+line_sep[1]+\" \"+line_sep[2]+\" \"+str(line_sep[3])+\" \"+line_sep[4]+\"\\n\")\n return list", "def write_kpi_indices(dst_file):\n global kpi_list\n with open(dst_file, 'w') as f:\n for kpi in kpi_list:\n f.write(kpi.desc() + '\\n')", "def export_list_to_xacro(list, filename):\n global robot, OUTPUT\n doc = Document()\n root = doc.createElement('robot')\n doc.appendChild(root)\n root.setAttribute(\"xmlns:xacro\", \"http://www.ros.org/wiki/xacro\")\n print ('exporting ' + os.path.basename(filename))\n for string in list:\n for link in robot.links:\n if robot.links[link].name.find(string) != -1:\n root.appendChild(robot.links[link].to_xml(doc))\n for joint in robot.joints:\n if robot.joints[joint].child == robot.links[link].name:\n root.appendChild(robot.joints[joint].to_xml(doc))\n write_comments_in_xacro(doc, filename)", "def dump(self):\n return []", "def save_data_to_file(file_name, list_of_product_objects):\r\n objfile = open(file_name, 'w')\r\n for row in list_of_product_objects:\r\n objfile.write(row.product_name + \",\" + str(row.product_price) + \"\\n\")\r\n objfile.close()", "def write_csv(list_file, path):\n\n\twith open(path, 'w') as f:\n\t\twriter = csv.writer(f, delimiter=',')\n\t\tfor i in list_file:\n\t\t\twriter.writerow(i)", "def writeQrels(qrelList, fileName):\n with open(fileName, 'w') as f:\n for e in qrelList:\n f.write(qrelEntry2Str(e))\n f.write('\\n')", "def csv_save_list(list_data, path, lineterminator='\\n', encoding=None):\n with open(path, 'w') as f:\n writer = csv.writer(f, lineterminator=lineterminator)\n for item in list_data:\n if encoding is not None:\n writer.writerow([item.encode(encoding)])\n else:\n writer.writerow([item])", "def SaveList(list_variable, strfile, separator=','):\n\n robomath.Mat(list_variable).tr().SaveMat(strfile, separator)", "def save_to_file(cls, list_objs):\n filen = cls.__name__ + \".json\"\n\n list = []\n if list_objs is not None:\n for i in list_objs:\n i = i.to_dictionary()\n json_dict = json.loads(cls.to_json_string(i))\n list.append(json_dict)\n\n with open(filen, \"w\") as fd:\n json.dump(list, fd)", "def OutputList(self):\n\n if hasattr(self,'fp'):\n fp = self.fp\n else:\n fp = 999\n\n if hasattr(self,'Vdot_ratio'):\n Vdot_ratio = self.Vdot_ratio\n else:\n Vdot_ratio = 1 \n \n return [\n ('M1','-',self.M[0]),\n ('M2','-',self.M[1]),\n ('M3','-',self.M[2]),\n ('M4','-',self.M[3]),\n ('M5','-',self.M[4]),\n ('M6','-',self.M[5]),\n ('M7','-',self.M[6]),\n ('M8','-',self.M[7]),\n ('M9','-',self.M[8]),\n ('M10','-',self.M[9]),\n ('P1','-',self.P[0]),\n ('P2','-',self.P[1]),\n ('P3','-',self.P[2]),\n ('P4','-',self.P[3]),\n ('P5','-',self.P[4]),\n ('P6','-',self.P[5]),\n ('P7','-',self.P[6]),\n ('P8','-',self.P[7]),\n ('P9','-',self.P[8]),\n ('P10','-',self.P[9]),\n ('Heat Loss Fraction','-',fp),\n ('Displacement scale factor','-',Vdot_ratio),\n ('Power','W',self.W),\n ('Mass flow rate','kg/s',self.mdot_r),\n ('Inlet Temperature','K',self.Tin_r),\n ('Inlet Pressure','kPa',self.pin_r),\n ('Outlet Temperature','K',self.Tout_r),\n ('Outlet Pressure','kPa',self.pout_r),\n ('Inlet Enthalpy','J/kg',self.hin_r),\n ('Outlet Enthalpy','J/kg',self.hout_r),\n ('Overall isentropic efficiency','-',self.eta_oi),\n ('Pumped flow rate','m^3/s',self.Vdot_pumped)\n ]", "def List_to_CSV(OutFname, DataList):\n with open(OutFname, 'w') as myfile:\n wr = csv.writer(myfile, delimiter=',')\n wr.writerows(line for line in DataList)", "def csvwrite(inlist, stringify=False):\n out_list = []\n for entry in inlist:\n if stringify:\n new_entry = []\n for val in entry:\n if not isinstance(val, basestring):\n val = str(val)\n new_entry.append(val)\n entry = new_entry\n this_line = ', '.join([elem_quote(val) for val in entry])\n out_list.append(this_line)\n return out_list", "def save_to_file(cls, list_objs):\n filename = cls.__name__ + \".json\"\n result = []\n if list_objs:\n for objs in list_objs:\n dictionary = objs.to_dictionary()\n result.append(dictionary)\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n file.write(cls.to_json_string(result))", "def toList(filename):\n pt_portname = Word(alphanums+'_')\n pt_portname_bus = Word(alphanums+\"_[]*\")\n\n pt_get_ports = Suppress('[') + Keyword('get_ports') + \\\n Suppress('{') + (pt_portname_bus|pt_portname) + \\\n Suppress('}') + Suppress(']')\n pt_get_cells = Suppress('[') + Keyword('get_cells') + \\\n Suppress(restOfLine) #ignore\n\n pt_set_property = Keyword('set_property') + Word(alphanums+'_') + \\\n Word(alphanums+'_') + (pt_get_ports|pt_get_cells)\n\n pt_continuation = ('\\\\' + LineEnd()).suppress()\n pt_set_other = Literal('set_') + Suppress(restOfLine) + Optional(pt_continuation)\n\n parser = ZeroOrMore (Group (pt_set_property | pt_set_other) ) \n\n parser.ignore('#' + restOfLine)\n\n result_list = parser.parseFile(filename)\n\n return result_list", "def write_text_file(data, file_name):\n\timport types\n\toutf = open(file_name, \"w\")\n\tif (type(data[0]) == types.ListType):\n\t\t# It is a list of lists\n\t\tfor i in xrange(len(data[0])):\n\t\t\tfor j in xrange(len(data)):\n\t\t\t\tif type(data[j][i]) == type(0):\n\t\t\t\t\toutf.write(\" %12d\"%data[j][i])\n\t\t\t\telse:\n\t\t\t\t\toutf.write(\" %12.5g\"%data[j][i])\n\t\t\toutf.write(\"\\n\")\n\telse:\n\t\t# Single list\n\t\tfor j in xrange(len(data)):\n\t\t\tif type(data[j]) == type(0):\n\t\t\t\toutf.write(\" %12d\\n\"%data[j])\n\t\t\telse:\n\t\t\t\toutf.write(\" %12.5g\\n\"%data[j])\n\toutf.close()", "def getList(self):\n return self.position.exportToList()", "def generate_csv(lists, output_file):\n if os.path.isfile(output_file):\n with open(output_file, 'a') as file:\n dataset = tablib.Dataset()\n for l in lists:\n dataset.append([l['Original ASIN'], l['Associated ASIN'], l['Title'], l['Price'], l['Currency Code'], l['Relationship']])\n file.write(dataset.csv)\n else:\n with open(output_file, 'w+') as fp:\n dataset = tablib.Dataset(headers=['Original ASIN', 'Associated ASIN', 'Title', 'Price', 'Currency Code', 'Relationship'])\n for l in lists:\n dataset.append([l['Original ASIN'], l['Associated ASIN'], l['Title'], l['Price'], l['Currency Code'], l['Relationship']])\n fp.writelines(dataset.csv)", "def file(self):\n result = []\n completePath = CompletePath(self.path, self.filename) \n with open(completePath.path(), 'w', newline='') as csvfile:\n fieldnames = ['Activity', 'Points']\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n writer.writeheader()\n for i in range ( len( self.groupPriority.rows() ) ):\n tmp = self.groupPriority.rows()[i]\n self.log.info ( \"FinalCSV\", \"file\",\"data {0},{1}\".format( tmp.activity(), tmp.points() ) )\n writer.writerow({'Activity': tmp.activity(), 'Points': tmp.points()})\n self.log.info(\"FinalCSV\", \"file\", \"Elaborated file: {0}\".format ( completePath.path() ) )", "def write(self, data, filename):\n id_ = 1\n weightlist_el = Element('weight-list')\n for dataset in data:\n weight_el = SubElement(weightlist_el, 'weight')\n id_el = SubElement(weight_el, 'id')\n id_el.text = str(id_)\n date_el = SubElement(weight_el, 'date')\n date_el.text = str(dataset.date) + 'T12:00:00'\n value_el = SubElement(weight_el, 'value')\n value_el.text = str(dataset.weight)\n comment_el = SubElement(weight_el, 'comment')\n comment_el.text = dataset.note\n id_ += 1\n st_tree = ElementTree(weightlist_el)\n st_tree.write(filename, encoding='UTF-8')", "def writeFloatListToFile(ldata, prec, filePath):\n\twith open(filePath, \"w\") as fh:\n\t\tfor d in ldata:\n\t\t\tfh.write(formatFloat(prec, d) + \"\\n\")", "def save_to_file(cls, list_objs):\n with open(cls.__name__ + \".json\", 'w') as my_file:\n if list_objs is None:\n json.dump([], my_file)\n\n else:\n list_of_dict = []\n for dictionary in list_objs:\n list_of_dict.append(dictionary.to_dictionary())\n j_list_objs = Base.to_json_string(list_of_dict)\n my_file.write(j_list_objs)\n return (my_file)", "def write_edgelist(H, path, delimiter=\" \", encoding=\"utf-8\"):\n with open(path, \"wb\") as file:\n for line in generate_edgelist(H, delimiter):\n line += \"\\n\"\n file.write(line.encode(encoding))", "def write_nested_string_list_to_file(string_list, filename):\n with open(filename, 'w') as f:\n for i in range(0,len(string_list)):\n for element in string_list[i]:\n f.write(element+'\\t'+str(i)+'\\n')", "def write(cls, vas):\n with open(Y, 'w') as f_i:\n for items in vas:\n f_i.write('%s ' % items)\n print(\"File written successfully. Check out \\\"output.txt\\\" file\")\n f_i.close()", "def writeCSV(list, filename):\n with open(filename, \"w\") as file:\n for row in list:\n for i in range(len(row)):\n file.write(str(row[i]))\n if i != len(row) - 1:\n file.write(\",\")\n else:\n file.write(\"\\n\")\n return", "def lst_to_file(comment_lst, fullpathname):\n with open(fullpathname, 'w') as writer:\n for comment in comment_lst:\n clean_raw_comment = repr(comment).lstrip('\"\\'').rstrip('\"\\'')\n writer.write(clean_raw_comment + '\\n')" ]
[ "0.69543386", "0.6891831", "0.6824986", "0.6617422", "0.65862507", "0.65442276", "0.65424216", "0.64962", "0.64938307", "0.6466383", "0.6341536", "0.6341411", "0.6335272", "0.63172203", "0.628588", "0.6272017", "0.6266411", "0.6204448", "0.6160987", "0.61450326", "0.6117469", "0.6110277", "0.61017686", "0.6101525", "0.609988", "0.60822314", "0.6074963", "0.6071479", "0.60525036", "0.6051087", "0.6031934", "0.60164565", "0.5999683", "0.59944934", "0.5987078", "0.5980381", "0.5965327", "0.59596276", "0.5950334", "0.5940511", "0.59150505", "0.5912196", "0.5908148", "0.5903712", "0.58804965", "0.58654535", "0.586499", "0.58575183", "0.585703", "0.5852855", "0.5839675", "0.5832289", "0.58313847", "0.5831117", "0.5824691", "0.5821674", "0.5809094", "0.5801798", "0.580073", "0.5793504", "0.57917386", "0.5790252", "0.57891583", "0.5787103", "0.5784838", "0.5784724", "0.5776889", "0.5772881", "0.5772115", "0.5766419", "0.5761383", "0.5754195", "0.57518905", "0.57485926", "0.5744674", "0.5741912", "0.5736833", "0.5734924", "0.5732812", "0.5732784", "0.5722107", "0.5716823", "0.571642", "0.57121354", "0.57076156", "0.57028186", "0.5702153", "0.56959975", "0.5693104", "0.5688486", "0.5668671", "0.5668204", "0.5666919", "0.56647944", "0.56631196", "0.56620204", "0.5661286", "0.5658828", "0.56396466", "0.5636191" ]
0.582233
55
Export flat list fo file using csv
def csv_save_list(list_data, path, lineterminator='\n', encoding=None): with open(path, 'w') as f: writer = csv.writer(f, lineterminator=lineterminator) for item in list_data: if encoding is not None: writer.writerow([item.encode(encoding)]) else: writer.writerow([item])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def csv_file(data,output_dir,filename,order = [],head = True):\n with open(output_dir + filename + '.csv', 'w') as f:\n write = csv.writer(f)\n write.writerows(manip.dic_to_list(data,order,head),)\n return None", "def write_csv(list_file, path):\n\n\twith open(path, 'w') as f:\n\t\twriter = csv.writer(f, delimiter=',')\n\t\tfor i in list_file:\n\t\t\twriter.writerow(i)", "def persist_list_to_csv(liste, nom_fichier):\n with open(nom_fichier, 'w') as f:\n for elem in liste :\n f.write(\"{}\\n\".format(elem))", "def export(tako_list, filename):\n for tak in tako_list:\n tak = tak[0]\n l1 = [tak.ident, \"a\"]\n for gen in tak.genome.weightchr_a:\n l1.append(gen.ident)\n l1.append(gen.weight)\n l1.append(gen.mut_rate)\n l1.append(gen.dom)\n f = os.path.join(\"Data\", (filename[:-4] + \" gene data.csv\"))\n with open(f, 'a', newline=\"\") as csvfile:\n writ = csv.writer(csvfile)\n writ.writerow(l1)\n if len(tak.genome.weightchr_b) != 0:\n l2 = [tak.ident, \"b\"]\n for gen in tak.genome.weightchr_b:\n l2.append(gen.ident)\n l2.append(gen.weight)\n l2.append(gen.mut_rate)\n l2.append(gen.dom) \n writ.writerow(l2)", "def writeCSV(path,aList):\n\twith open(path,'wb') as w:\n\t\ta = csv.writer(w, delimiter = ',')\n\t\ta.writerows(aList)\n\tw.close()", "def writeCSV(list, filename):\n with open(filename, \"w\") as file:\n for row in list:\n for i in range(len(row)):\n file.write(str(row[i]))\n if i != len(row) - 1:\n file.write(\",\")\n else:\n file.write(\"\\n\")\n return", "def _csv_export(self, exppath):\n with open(exppath, 'w') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',', skipinitialspace=True)\n csvwriter.writerow(['hexstr','dmc','name'])\n for clr in self.lookup_table:\n csvwriter.writerow([clr.hex.to_str(), clr.id, clr.name])", "def generate_csv(self, lista):\r\n\t\ts = ''\r\n\t\tsalida = self.get_rel_path() + \"/\" + \"tree_names.csv\"\r\n\t\tfor i in lista:\r\n\t\t\t#st = i[2].split('/')\r\n\t\t\t#newpath = os.path.join(i[1],st)\r\n\t\t\thash = str(i[0])\r\n\t\t\tname_path = str(i[1] + \"/\" + i[2])\r\n\t\t\t#s = s + str(i[0]) + \";\" + i[1] + \"/\" + i[2] + \"\\n\"\r\n\t\t\tself.copy_file(hash,name_path)\r\n\t\t\ts = s + str(hash + \";\" + name_path + \"\\n\")\r\n\r\n\t\tf = open(salida,\"w\")\r\n\t\tf.write(s)\r\n\t\treturn salida", "def List_to_CSV(OutFname, DataList):\n with open(OutFname, 'w') as myfile:\n wr = csv.writer(myfile, delimiter=',')\n wr.writerows(line for line in DataList)", "def generate_csv(lists, output_file):\n if os.path.isfile(output_file):\n with open(output_file, 'a') as file:\n dataset = tablib.Dataset()\n for l in lists:\n dataset.append([l['Original ASIN'], l['Associated ASIN'], l['Title'], l['Price'], l['Currency Code'], l['Relationship']])\n file.write(dataset.csv)\n else:\n with open(output_file, 'w+') as fp:\n dataset = tablib.Dataset(headers=['Original ASIN', 'Associated ASIN', 'Title', 'Price', 'Currency Code', 'Relationship'])\n for l in lists:\n dataset.append([l['Original ASIN'], l['Associated ASIN'], l['Title'], l['Price'], l['Currency Code'], l['Relationship']])\n fp.writelines(dataset.csv)", "def gp_file(data,filename,output_dir='',order = [],head = False):\n f = open(output_dir + filename + '.csv', 'w')\n f.write(str(len(order)-1) + '\\n')\n write = csv.writer(f)\n write.writerows(manip.dic_to_list(data,order,head),)\n f.closed\n\n return None", "def save_csv(filename, save_list):\n with open(filename, mode='w') as csv:\n csv.writelines([','.join(item) + '\\n' for item in save_list])", "def outputFunc(filename, resultList):\n #assert len(parks) == 3\n \n f = open(filename, 'wt')\n \n try:\n writer = csv.writer(f)\n for i in range(len(resultList)):\n print resultList[0]\n writer.writerow(resultList[i])\n \n finally:\n f.close()", "def trans_list_to_csv(data_list):\n pseudo_buffer = Echo()\n writer = csv.writer(pseudo_buffer)\n first_line = sorted(data_list[0].keys())\n result = \"\\ufeff\" + \"\".join([writer.writerow(first_line)] +\n [writer.writerow([row[k] for k in first_line]) for row in data_list])\n return result", "def file(self):\n result = []\n completePath = CompletePath(self.path, self.filename) \n with open(completePath.path(), 'w', newline='') as csvfile:\n fieldnames = ['Activity', 'Points']\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n writer.writeheader()\n for i in range ( len( self.groupPriority.rows() ) ):\n tmp = self.groupPriority.rows()[i]\n self.log.info ( \"FinalCSV\", \"file\",\"data {0},{1}\".format( tmp.activity(), tmp.points() ) )\n writer.writerow({'Activity': tmp.activity(), 'Points': tmp.points()})\n self.log.info(\"FinalCSV\", \"file\", \"Elaborated file: {0}\".format ( completePath.path() ) )", "def write_csv(csv_list, out_csv_path):\n with open(out_csv_path, 'w', newline='') as csv_file:\n csv_writer = csv.writer(csv_file)\n for row in csv_list:\n csv_writer.writerow(row)", "def csvwrite(inlist, stringify=False):\n out_list = []\n for entry in inlist:\n if stringify:\n new_entry = []\n for val in entry:\n if not isinstance(val, basestring):\n val = str(val)\n new_entry.append(val)\n entry = new_entry\n this_line = ', '.join([elem_quote(val) for val in entry])\n out_list.append(this_line)\n return out_list", "def write_csv(self, filelike):\r\n items = self.rows()\r\n writer = unicodecsv.writer(filelike, encoding=\"utf-8\")\r\n writer.writerow(self.header())\r\n for item in items:\r\n writer.writerow(item)", "def write_csv(fname, olist):\n ofile = open(fname, \"wb\")\n writer = csv.writer(ofile, delimiter=',', quotechar='\"',\n quoting=csv.QUOTE_ALL)\n writer.writerows(olist)", "def export_data(fp, app_name):\n from otree.views.admin import get_display_table_rows\n colnames, rows = get_display_table_rows(\n app_name, for_export=True, subsession_pk=None)\n colnames = ['{}.{}'.format(k, v) for k, v in colnames]\n writer = csv.writer(fp)\n writer.writerows([colnames])\n writer.writerows(rows)", "def write_csv(row_list,out_name,*header_strings : str):\n with open(out_name,'w',newline='') as result_file:\n wr = csv.writer(result_file, delimiter='\\t')\n if header_strings:\n wr.writerow([name for name in header_strings])\n if type(row_list[0]) is list:\n wr.writerows(row_list)\n else:\n for row in row_list:\n wr.writerow([row])", "def save_to_csv(list_return, name, fieldnames):\n os.makedirs(os.path.dirname(name + '.csv'), exist_ok=True)\n with open(name + '.csv', 'w') as csvfile:\n csvfile.write(','.join(map(str, field_names)))\n csvfile.write('\\n')\n write = csv.writer(csvfile, delimiter=',')\n for x in range(0, len(list_return)):\n write.writerow(list_return[x])", "def generate_csv(type, json_list, columns_list):\n with open(\"data/\" + type + \"_\" + time.strftime(\"%Y-%m-%d_%H:%M:%S\") +\n \".csv\", 'a+') as f:\n csv_file = csv.DictWriter(f, fieldnames=columns_list,\n extrasaction=\"ignore\")\n csv_file.writeheader()\n for item in json_list:\n csv_file.writerow(item)\n print(\"\\nCSV file saved as data/\" + type + \"_\" +\n time.strftime(\"%Y-%m-%d_%H:%M:%S\") + \".csv\")", "def save_to_file_csv(cls, list_objs):\n l = []\n if list_objs is not None:\n for item in list_objs:\n l.append(item.to_dictionary())\n with open(\"%s.csv\" % cls.__name__, mode='w') as f:\n f.write(Base.to_json_string(l))", "def csv_output(self):\r\n fh = open(\"output.csv\",'w')\r\n for i in range(len(self.population.columns)):\r\n if i != len(self.population.columns)-1:\r\n fh.write(str(self.population.columns[i]))\r\n fh.write(\",\")\r\n else:\r\n fh.write(str(self.population.columns[i]))\r\n fh.write(\"\\n\")\r\n\r\n for i in range(len(self.population.data)):\r\n for j in range(len(self.population.data[i])):\r\n if j != len(self.population.data[i])-1:\r\n fh.write(str(self.population.data[i][j]))\r\n fh.write(\",\")\r\n else:\r\n fh.write(str(self.population.data[i][j]))\r\n fh.write(\"\\n\")\r\n fh.close()", "def csv_file_creator(path, list_of_jobs):\n with open(path, \"wb\") as out_file:\n writer = UnicodeWriter(out_file, delimiter=',')\n for row in list_of_jobs:\n writer.writerow(row)", "def writetoCSV(self, fileName):\n\n with open(fileName, 'w') as writeFile:\n writeFile.write(\"ID,Fx,Fy,Fz\\n\")\n for fstnr in F:\n writeFile.write(str(fstnr.ID))\n for i in fstnr.force:\n writeFile.write(',' + str(i))\n writeFile.write('\\n')", "def list_to_csv(list, output_file, header):\n with open(output_file, 'w', ) as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow(header)\n writer.writerows(list)\n\n csvFile.close()", "def writeItemsToCSV(self, fileName, itemList):\n\t\twith open(fileName, 'w') as csvFile:\n\t\t csvWriter = csv.writer(csvFile, delimiter=',')\n\t\t # Column titles\n\t\t csvWriter.writerow([\"Brandname\",\"Productname\",\"Colors\",\"Sizes\",\"Description\",\"Materials\",\"Maintenance\",\"RegularPrice\",\"CurrentPrice\"])\n\t\t for item in itemList:\n\t\t csvWriter.writerow(list(item))", "def generate_csv(self, output_file):\n try: # We are going to \"try\" something\n csv_file = open(output_file, 'w+') # open \"output_file\" as a writable file and return a handle called \"csv_file\"\n except OSError as err: # If something goes wrong with the open, we catch the exception\n fatal(\"{0}\".format(err), -1) # exit with something other than 0 so the shell knows something went wrong\n \n writer = csv.writer(csv_file) # create a CSV writing object that's pointing at our open file handle\n\n writer.writerow([\"Question\",\"Answers\"]) # Let's write the top row\n for k in self.questions.keys(): # Let's walk down the directory by key\n # write the \"key\" (which is the question) and then let's take the list of answers and create a comma delmited list.\n # this is likely totally wrong since you could have an answer in it that also has a comma...\n writer.writerow([k, \",\".join(self.questions[k].answers)]) # insert a key (which is the question) and then let's take the array of \n\n csv_file.close() # close the csv_file file handle", "def user_list_csv():\n us = user.User.query.all()\n filename = 'xxx.csv'\n csv_name = _rename_file(filename)\n url = app.config['CSV_FILES_DEST'] + '/' + csv_name\n with codecs.open(url, 'wb') as csvfile:\n #fieldnames = ['账号', '姓名', '描述', '角色', '邮箱', '电话', '工作电话', '公司', '部门', '职位']\n fieldnames = []\n if len(us) > 0:\n fieldnames = us[0].to_csv_dict().keys()\n writer = unicodecsv.writer(csvfile, encoding='utf-8-sig')\n writer.writerow(fieldnames)\n for u in us:\n dct = u.to_csv_dict()\n n_items = {}\n for name in fieldnames:\n if dct[name] is not None:\n n_items[name] = dct[name]\n else:\n n_items[name] = ''\n writer.writerow(n_items.values())\n return send_file(url)", "def create_csv(self):\n try:\n # Convert List of Lists to DataFrame and write it to a CSV\n pd.DataFrame(self.data, columns=self.header) \\\n .to_csv(os.path.join(self.file_path, self.file_name), index=False)\n self.successful_run = True\n except:\n # TODO create Exception Handling\n raise", "def to_csv(self, csvwriter):\n csvwriter.writerow(self.to_csv_row())", "def makeCSV(self,file_name, data, topList):\n file_name = file_name+\".csv\"\n w = csv.writer(open(file_name, \"w\"))\n w.writerow(topList)\n for key, val in data.items():\n row = list(val)\n row.insert(0,key)\n w.writerow(row)", "def save_list_of_list(data, path, lineterminator='\\n', encoding=None):\n with open(path, 'w') as f:\n writer = csv.writer(f, lineterminator=lineterminator)\n if encoding is not None:\n data = [[item.encoding(encoding) for item in items]\n for items in data]\n writer.writerows(data)", "def save_csv(outputfile):\n with open(outputfile, 'w', newline='') as outfile:\n writer = csv.writer(outfile)\n writer.writerow(DATA_KEYS)\n\n # Add data to csv-file\n for data in data_list:\n writer.writerow(data)", "def DumpCsv(data):\n \n raise Exception('TBI: Need standard container structure for this to work, cause its flat...')", "def ExportAsCSV(csv_out_path, data):\n with open(csv_out_path, \"w\", newline=\"\") as f:\n writer = csv.writer(f, delimiter=',')\n writer.writerows(data)\n f.close()\n return", "def convert2csv(contacts, output_path):\n\n print(\"[!] not implemented yet\")", "def generate_csv(table, header):\n with open(\"%s.csv\" % header, \"w\") as csvfile:\n for i in range(len(table)):\n for j in range(len(table[i])):\n if j != len(table[i])-1:\n tmp = table[i][j] + \",\"\n else:\n tmp = table[i][j] + \"\\n\"\n csvfile.write(tmp)", "def output_to_file(utilist, filepath=\"demo.csv\"):\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n with open(filepath, \"a\") as f:\n f.write(utilist + \"\\n\")", "def writeCSV():\n final_list = get_final_list()\n path_to_csv_File = 'system_metrics.csv'\n\n csv_file = open(path_to_csv_File, 'w+', newline='', encoding=\"utf8\")\n csv_file_writer = csv.writer(csv_file, delimiter=',')\n\n csv_file_writer.writerow(['Subscription', 'Resource', 'MetricType',\n 'Timestamp', 'Unit', 'Minimum', 'Maximum', 'Average'])\n\n for item in final_list:\n csv_file_writer.writerow([item['subscription'], item['resource'], item['metricType'], item['timestamp'],\n item['unit'], item['minimum'], item['maximum'], item['average']])\n\n print('Output written successfully!!')", "def to_csv_file_obj(self, rows):\n output = StringIO.StringIO()\n writer = csv.writer(output)\n writer.writerows(rows)\n return output", "def write_list(outputfilename, list):\r\n try:\r\n with open(outputfilename, 'w', newline='', encoding='utf-8') as outfile:\r\n itemwriter = csv.writer(outfile, delimiter=\",\")\r\n for item in list:\r\n itemwriter.writerow(item)\r\n except:\r\n input(\"File still open! Please close and press enter to continue\")\r\n with open(outputfilename, 'w', newline='', encoding='utf-8') as outfile:\r\n itemwriter = csv.writer(outfile, delimiter=\",\")\r\n for item in list:\r\n itemwriter.writerow(item)", "def csv(self, destination_path):\n # todo - test for single and duplicate base cases\n to_csv(self._axl_data, destination_path)", "def export_users(_request):\n query = models.UserProfile.all().order('email')\n rows = []\n for user in query:\n is_superuser = 0\n if user.is_superuser:\n is_superuser = 1\n rows.append('%s,%s\\n' % (user.email, is_superuser))\n\n response = http.HttpResponse(''.join(rows), mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=users.csv'\n return response", "def write_to_csv(list_of_emails):\n import csv\n # use newline='' to prevent double-spaced rows\n with open('emails.csv', 'w', newline='') as outFile:\n outWriter = csv.writer(outFile)\n charNum = outWriter.writerow(['email'])\n for i in list_of_emails:\n charNum = outWriter.writerow([i])\n outFile.close()", "def write_to_csv(list_of_rows, file_name):\n with open(file_name, 'w') as f:\n writer = csv.writer(f)\n for row in list_of_rows:\n if None in row:\n continue\n writer.writerow(row)\n \n f.close()", "def exportList(self, list_id):\n params = {'LIST_ID' : list_id,\n 'EXPORT_TYPE' : 'ALL',\n 'EXPORT_FORMAT': 'CSV',\n 'FILE_ENCODING': 'utf-8'}\n xrequest = xml_str(self.buildRequestEtree('ExportList', params))\n xresults = self.request(xrequest)\n xpath = '/Envelope/Body/RESULT/FILE_PATH'\n return xresults.xpath(xpath)[0].text", "def save_csv(self, filename): # DONE\n self.data.to_csv(filename)", "def generate_csv(inf, outf):\n o = csv.writer(outf)\n o.writerow(COLUMNS)\n for row in reformat_data(inf):\n o.writerow([inf.name] + row)", "def write_to_file(self, results):\n with open(self.outputFilename, \"w\") as csvFile:\n csvWriter = csv.writer(csvFile, delimiter=',') \n title_row = ('asset_id', 'component_id', 'latitude', 'longitude', 'installation_date', 'commissioning_date', 'street_name', 'cabinet_id', 'nominal_wattage', 'current_time', 'current_LogValue', 'current_IsLogValueOff') \n csvWriter.writerow(title_row)\n for record in results:\n csvWriter.writerow(record)", "def to_csv(header, rows):\r\n with open('result.csv', 'w') as result:\r\n result_writer = csv.writer(result, delimiter=';')\r\n result_writer.writerow(header)\r\n result_writer.writerows(rows)", "def csv_writer(data, path):\n\twith open(path, \"wb\") as csv_file:\n\t\twriter= csv.writer(csv_file, delimiter=',')\n\t\twriter.writerows(data)", "def _write_csv(self):\n\n # add the label to the header\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self.header.append('Date')\n else:\n self.header.append('sample id')\n\n key_list = []\n\n for i, cube in enumerate(self.cube_list):\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self._write_sample_with_date(cube, i, key_list)\n else:\n self._write_sample(cube, i, key_list)\n\n output_data_file_path = self._get_full_file_name()\n self._write_data_dict(output_data_file_path, key_list)\n\n return [output_data_file_path]", "def export_player_list_csv():\n # get players belonging to a team\n players = Player.objects.select_related(\n 'team', 'team__manager'\n ).order_by(\n '-value', 'code'\n )\n\n with open('./data/player_list_auction.csv', 'w') as f:\n writer = csv.writer(f)\n\n for position in Player.POSITION:\n\n writer.writerow([\n 'Code',\t'Name',\t'Team',\t'Value', 'Pts', 'Manager', 'Manager Nominations'\n ])\n\n for p in players.filter(position=position[0]):\n writer.writerow([\n p.code,\n p.name,\n p.prem_team.code,\n p.value,\n p.last_years_total,\n p.team.manager.username if p.team else '',\n p.auction_nomination_managers\n ])", "def _export_csv(x, y, export_to):\r\n\r\n with open(export_to, 'w', newline='') as e:\r\n writer = csv.writer(e, delimiter=',')\r\n for i in range (0, len(x)):\r\n writer.writerow([x[i], y[i]])", "def save_items_to_csv(items_data: pd.DataFrame):\n with open('etsy_items.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerows(items_data)", "def export_csv(self):\n outputfile = tkinter.filedialog.asksaveasfilename(\n defaultextension=\".csv\",\n filetypes=((\"comma seperated values\", \"*.csv\"),\n (\"All Files\", \"*.*\")))\n if outputfile:\n tabledata = self.tabs.window.aistracker.create_table_data()\n export.write_csv_file(tabledata, outputfile)\n else:\n raise ExportAborted('Export cancelled by user.')", "def write_table_to_csv(table: List[List], filename: str):\n with open(filename, 'w') as csvfile:\n writer = csv.writer(csvfile, delimiter='\\t')\n for row in table:\n writer.writerow(row)", "def export_csv(self, csvfileobject):\n for index, track in enumerate(self._tracks):\n csvfileobject.writerow(track.properties)\n for delta in track.periods: \n csvfileobject.writerow(delta.properties)", "def labels2csv(labels, csv_path):\n with open(csv_path, \"w\") as file:\n file.write(\"id,label\\n\")\n for i, label in enumerate(labels):\n file.write(\"{},{}\\n\".format(i, label))", "def csvdata(nodelist):\n\n data = \"\"\n for subnode in nodelist:\n if (subnode.nodeType == subnode.ELEMENT_NODE):\n try:\n data = data + \",\" + subnode.childNodes[0].data\n except:\n data = data+ \",\"\n return data[1:] + \"\\n\"", "def export_csv(state, out_file=None):\n\n if out_file is None:\n csvfile = sys.stdout\n else:\n csvfile = open(out_file, 'w')\n\n try:\n writer = csv.writer(csvfile)\n for grade in state.grades:\n writer.writerow([grade.student_name(), grade.score(),\n grade.breakdown(state.user_name)])\n finally:\n if out_file is not None:\n csvfile.close()", "def export_csv(user, tasks):\n employee_name = user[0]['name']\n employee_id = user[0]['id']\n csvfile = '{}.csv'.format(employee_id)\n with open(csvfile, mode='w') as file:\n towrite = csv.writer(file, delimiter=',', quoting=csv.QUOTE_ALL)\n for task in tasks:\n towrite.writerow([employee_id, employee_name,\n task['completed'], task['title']])", "def writeCSV(filename, separator, data):\n \n filetowrite = open(filename, \"w\")\n values = []\n i = 0 #Count the number of objects already written\n for item in data:\n filetowrite.write(item)\n i += 1\n if i < len(data.keys()):\n filetowrite.write(separator)\n values.append(data[item])\n filetowrite.write(\"\\n\")\n i = 0\n for value in values:\n filetowrite.write(str(value))\n i += 1\n if i < len(values):\n filetowrite.write(separator)\n \n filetowrite.close()", "def write_csv(self, stock_list):\n\n with open(self.outfile, 'w') as outfile:\n writer = csv.writer(outfile, delimiter=',',\n quoting=csv.QUOTE_MINIMAL)\n for symbol, values in stock_list.items():\n # Need to find a better way to handle this...\n writer.writerow([values['symbol'], values['name']])", "def export_corpus_csv(corpus,path, delimiter = ',', trans_delimiter = '.'):\n word = corpus.random_word()\n header = sorted(word.descriptors)\n with open(path, encoding='utf-8', mode='w') as f:\n print(delimiter.join(header), file=f)\n for key in corpus.iter_sort():\n print(delimiter.join(make_safe(getattr(key, value),trans_delimiter) for value in header), file=f)", "def write_csv_file (metadata_list, csv_file, append) :\n try :\n with open (csv_file, 'a' if append else 'w' , newline='') as file :\n writer = csv.DictWriter(file, fieldnames=MetadataEntity.get_fieldnames())\n if not append: writer.writeheader()\n for e in metadata_list :\n writer.writerow(e.get_values())\n file.close()\n except :\n print ('ERROR: writing csv file: ' + csv_file)\n return False\n return True", "def each_to_csv(data, key, value):\n data.to_csv(\"camelot/clean/nrld_{}_{}.csv\".format(key, value), index=False)\n return data", "def write_csv(self, outfile, collapse_orders=False, show_age=False):\r\n # Write header row\r\n outfile.write(self.get_csv_header(collapse_orders, show_age).encode())\r\n\r\n # Write content\r\n for x in self.records:\r\n x.write_csv(outfile, collapse_orders, show_age)", "def _export_users(admin_access_token):\n admin = User.query.filter_by(id_=ADMIN_USER_ID).one_or_none()\n if admin_access_token != admin.access_token:\n raise ValueError(\"Admin access token invalid.\")\n csv_file_obj = io.StringIO()\n csv_writer = csv.writer(csv_file_obj, dialect=\"unix\")\n for user in User.query.all():\n csv_writer.writerow(\n [user.id_, user.email, user.access_token, user.username, user.full_name]\n )\n return csv_file_obj", "def CSVWriter (iterable, outLoc, header=\"\", ):\n if not iterable:\n print (\"nothing to write\")\n return 0\n\n out = open(outLoc, 'w')\n\n if header:\n out.write(header+'\\n')\n\n #Only works if iterable is a nested list\n for member in iterable:\n for item in member:\n out.write(str(item)+',')\n out.write('\\n')\n\n print(\"write to \"+outLoc+\" successful.\")\n return 1", "def dump_csv(f, rra, out):\n w = writer(out)\n for row in dump(f, rra):\n w.writerow([s.strip() for s in row])", "def csv_writelist(file, oldfile, chlst, num):\n import csv\n writelist = checkdifferences(oldfile, chlst, num)\n print('before', writelist)\n with open('{}.csv'.format(file), 'w', newline='') as csvwrite:\n writer = csv.writer(csvwrite, delimiter=';')\n try:\n for eachrow in writelist:\n writer.writerow(eachrow)\n except:\n if TypeError:\n print('Typeerror')\n csvwrite.close()", "def export_to_csv(self, file_name):\n \n with open(file_name, 'w', newline='') as csvDataFile:\n csvWriter = csv.writer(csvDataFile, delimiter = ',')\n\n for i in range(0,self.sample_num):\n data = list()\n data.append(self.sample[i].simulation_name)\n data.append(self.sample[i].result_name)\n data.extend(self.sample[i].parameters.tolist())\n data.extend(self.sample[i].result) \n csvWriter.writerow(data)", "def add_to_csv(file_name, single_list):\n final_list = read_csv(file_name)\n writer = csv.writer(open(file_name, 'wb'), delimiter=',',quoting=csv.QUOTE_MINIMAL)\n final_list.append(single_list)\n for x in final_list:\n writer.writerow(x)", "def save_to_file_csv(cls, list_objs):\n f_name = cls.__name__ + \".csv\"\n with open(f_name, 'w', newline='') as f:\n if list_objs is None or list_objs == []:\n f.write(\"[]\")\n\n else:\n if cls.__name__ == 'Rectangle':\n h = ['id', 'width', 'height', 'x', 'y']\n else:\n h = ['id', 'size', 'x', 'y']\n ncsv = csv.DictWriter(f, fieldnames=h)\n for obj in list_objs:\n ncsv.writerow(obj.to_dictionary())", "def data_to_csv(json_list, filename='out.csv'):\n with open('out.csv', 'w') as csvfile:\n header = get_header(json_list)\n cw = csv.writer(csvfile)\n cw.writerow(header)\n for dict in json_list:\n row = dict_to_list(dict, header)\n cw.writerow(row)", "def write(l, path, columns):\n \n file = open(path, 'w', newline = '', encoding = 'utf-8')\n writer = csv.writer(file, delimiter = '\\t', quotechar = '', quoting = csv.QUOTE_NONE)\n row = []\n for col in columns:\n row.append(col)\n writer.writerow(row)\n for entry in l:\n row = []\n for col in columns:\n row.append(entry[col])\n writer.writerow(row)\n file.close()", "def construct_csv(cursor):\n header, data = construct_list(cursor)\n # python 2 and 3 handle writing files differently\n if sys.version_info[0] <= 2:\n output = io.BytesIO()\n else:\n output = io.StringIO()\n writer = csv.writer(output)\n\n writer.writerow(header)\n for row in data:\n writer.writerow(row)\n\n return output.getvalue()", "def save_to_file_csv(cls, list_objs):\n ld = []\n with open(cls.__name__ + \".csv\", \"w\", encoding=\"utf-8\") as f:\n if list_objs:\n for obj in list_objs:\n if cls.__name__ == 'Rectangle':\n ld.append([\n obj.id, obj.width, obj.height, obj.x, obj.y])\n if cls.__name__ == 'Square':\n ld.append([obj.id, obj.size, obj.x, obj.y])\n writer = csv.writer(f)\n for row in ld:\n writer.writerow(row)", "def csv(file):\n\n def atoms(lst):\n return map(atom, lst)\n\n def atom(x):\n try:\n return int(x)\n except:\n try:\n return float(x)\n except ValueError:\n return x\n\n for row in rows(file, prep=atoms):\n yield row", "def save_to_file_csv(cls, list_objs):\n with open(cls.__name__ + \".csv\", \"w\", newline='') as f:\n if cls.__name__ == \"Rectangle\":\n fieldnames = ['id', 'width', 'height', 'x', 'y']\n elif cls.__name__ == \"Square\":\n fieldnames = ['id', 'size', 'x', 'y']\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n if list_objs is not None:\n for model in list_objs:\n writer.writerow(model.to_dictionary())", "def save_entries(self):\n with open(self.file_name, \"w\") as file:\n file.write('date,name,minutes,note\\n')\n for entry in self.entries:\n writer = csv.writer(file)\n writer.writerow([entry.date, entry.name, entry.minutes, entry.note])", "def column_output(self, output_fname, list_2d):\n\t\toutf = open(output_fname, 'w')\n\t\twriter = csv.writer(outf, delimiter='\\t')\n\t\tfor list in list_2d:\n\t\t\twriter.writerow(list)\n\t\tdel writer\n\t\toutf.close()", "def export_csv(self, path):\r\n\r\n with open(path, 'w') as f:\r\n f.write('# h,hr,m')\r\n\r\n if self.rho is not None:\r\n f.write(',rho')\r\n if self.temperature is not None:\r\n f.write(',temperature')\r\n\r\n f.write('\\n')\r\n for i in range(self.shape[0]):\r\n for j in range(self.shape[1]):\r\n f.write(f'{self.h[i, j]},{self.hr[i, j]},{self.m[i, j]}')\r\n if self.rho is not None:\r\n f.write(f',{self.rho[i, j]}')\r\n if self.temperature is not None:\r\n f.write(f',{self.temperature[i, j]}')\r\n f.write('\\n')\r\n return", "def save_csv(outfile, cities):\n writer = csv.writer(outfile)\n writer.writerow(['Name'])\n for row in cities:\n writer.writerow([row])", "def createFileCSV(table, path=\"./prediction\"):\t\n\tif len(table) < 1:\n\t\traise NameError('Empty Table!')\n\telse:\n\t\tfile = open(path + '.csv', 'w+')\n\n\t\tfile.write(table[0].toStringHeaders() + \"\\n\")\n\n\t\tfor row in table:\n\t\t\tfile.write(row.toStringCSV() + '\\n')\n\t\tfile.close()", "def export_fallout(): \n with open('fallout.csv', 'w', newline='') as csvfile:\n wr = csv.writer(csvfile, delimiter=',')\n wr.writerows(environment)", "def output(items, headers, outputFile):\n\tdictToValues = lambda d: \\\n\t\tmap(lambda h: d.get(h, ''), headers)\n\n\treturn writeCsv(outputFile, map(dictToValues, items))", "def write_csv(self, key_list, word_list):\n # Write out data\n out_data = []\n # Match filtered indexes to words\n for i in key_list.index:\n subset = word_list[word_list['key'] == i]\n # Add to aggregate list\n out_data.append(subset['word'].tolist())\n # Dump list to headerless CSV\n with open(self.output, 'w') as f:\n writer = csv.writer(f)\n writer.writerows(out_data)\n return len(out_data)", "def write_output_file(ad_models):\n\n with open('output-data-utf8.csv', 'w', newline='', encoding='UTF-8') as output_file:\n csv_writer = csv.writer(output_file, delimiter=',')\n for ad in ad_models:\n csv_writer.writerow((ad.date.strftime('%Y/%m/%d'), ad.country_code, ad.impression, ad.clicks))", "def generate_csv_file(rows: Collection[dict[str, str]]):\n file = io.StringIO()\n csv_writer = csv.DictWriter(file, (\"label\", \"inventory\", \"collection\"))\n csv_writer.writeheader()\n csv_writer.writerows(rows)\n file.seek(0)\n return io.BytesIO(bytes(file.read(), encoding=\"utf-8\"))", "def dump_list2csv_hdfs(sc, content_list, file_path):\n content_str = ''\n for i in range(len(content_list)):\n i_content = ','.join([str(col) for col in content_list[i]]) + '\\n'\n content_str += i_content\n write_to_hdfs(sc, file_path, content_str, overwrite=True)", "def outputFunc(filename, parks,roading,private):\n #assert len(parks) == 3\n \n f = open(filename, 'wt')\n \n try:\n writer = csv.writer(f)\n writer.writerow(days)\n writer.writerow(parks)\n writer.writerow(roading)\n writer.writerow(private)\n finally:\n f.close()", "def csv_write (data):\n \n csv_data=data[0:]\n csv1_data = open('backup.csv', 'a')\n csvwriter = csv.writer(csv1_data)\n\n count = 0\n\n for i in csv_data:\n if count == 0:\n header = i.keys()\n csvwriter.writerow(header)\n count += 1\n csvwriter.writerow(i.values())\n\n csv1_data.close()\n\n #http://blog.appliedinformaticsinc.com/how-to-parse-and-convert-json-to-csv-using-python/", "def save_class_list():\r\n try:\r\n classStringList.clear() #clear the classString List\r\n for i in range(0,len(classes)):\r\n classStringList.append(classes[i].csvRow()) #enter classes to the classStringList from the classes\r\n f = open(\"mySchedule.csv\", 'w', newline ='')\r\n csv.writer(f).writerow([\"Day\", \"Class\", \"Start Time\", \"End Time\"])\r\n for classCSVString in classStringList:\r\n csv.writer(f).writerow(classCSVString)\r\n f.close()\r\n except Exception as e:\r\n print(\"Exception found:\" + e)", "def sites_csv():\n import io\n import csv\n\n dest = io.StringIO()\n dest.write('\\ufeff')\n writer = csv.writer(dest, quoting=csv.QUOTE_MINIMAL)\n\n with Config() as config:\n with db.Connection(config) as con:\n writer.writerow(con.fieldnames)\n writer.writerows(con.read_entries())\n\n output = flask.make_response(dest.getvalue())\n output.headers[\"Content-Disposition\"] = \"attachment; filename=spatialcitizenscience.csv\"\n output.headers[\"Content-type\"] = \"text/csv\"\n return output", "def save_to_file_csv(cls, list_objs):\n list_dictionaries = []\n if list_objs is None or list_objs == []:\n string_dictionary = \"[]\"\n else:\n for _obj_dict in list_objs:\n list_dictionaries.append(_obj_dict.to_dictionary())\n string_dictionary = Base.to_json_string(list_dictionaries)\n with open(cls.__name__ + \".csv\", \"w\") as _file:\n _file.write(string_dictionary)\n _file.close()" ]
[ "0.7381743", "0.72397584", "0.720624", "0.7086666", "0.7081523", "0.70391846", "0.6996409", "0.6985795", "0.69504803", "0.6934752", "0.691903", "0.68434834", "0.6836178", "0.6823749", "0.681523", "0.68104255", "0.6807932", "0.6802946", "0.68010736", "0.67903537", "0.677872", "0.67750055", "0.67481095", "0.67027277", "0.66949314", "0.66834784", "0.6674238", "0.66666734", "0.66596204", "0.66366637", "0.66317755", "0.66279995", "0.6622523", "0.6616483", "0.66097856", "0.659507", "0.65940523", "0.65810865", "0.65731305", "0.6564888", "0.6562528", "0.6558745", "0.6556458", "0.6545499", "0.6522615", "0.6513514", "0.6509513", "0.64816946", "0.6461101", "0.64584696", "0.6455837", "0.64450145", "0.6442721", "0.6439073", "0.64345396", "0.6434199", "0.6431927", "0.6431617", "0.64300317", "0.64296114", "0.6424041", "0.64173305", "0.64123917", "0.6411384", "0.640888", "0.6402221", "0.63966525", "0.63851947", "0.6383985", "0.63787854", "0.6378406", "0.63737994", "0.63732904", "0.6367996", "0.6353304", "0.63524", "0.63509536", "0.6342274", "0.63422674", "0.633318", "0.6324426", "0.6315968", "0.63135123", "0.63072443", "0.63071716", "0.63003653", "0.62892807", "0.6285992", "0.6282795", "0.62781775", "0.6277397", "0.6271328", "0.6250762", "0.6249998", "0.624358", "0.62368035", "0.6230521", "0.6220997", "0.620265", "0.6201822" ]
0.6799009
19
Export list of list fo file using csv
def save_list_of_list(data, path, lineterminator='\n', encoding=None): with open(path, 'w') as f: writer = csv.writer(f, lineterminator=lineterminator) if encoding is not None: data = [[item.encoding(encoding) for item in items] for items in data] writer.writerows(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_csv(list_file, path):\n\n\twith open(path, 'w') as f:\n\t\twriter = csv.writer(f, delimiter=',')\n\t\tfor i in list_file:\n\t\t\twriter.writerow(i)", "def persist_list_to_csv(liste, nom_fichier):\n with open(nom_fichier, 'w') as f:\n for elem in liste :\n f.write(\"{}\\n\".format(elem))", "def export(tako_list, filename):\n for tak in tako_list:\n tak = tak[0]\n l1 = [tak.ident, \"a\"]\n for gen in tak.genome.weightchr_a:\n l1.append(gen.ident)\n l1.append(gen.weight)\n l1.append(gen.mut_rate)\n l1.append(gen.dom)\n f = os.path.join(\"Data\", (filename[:-4] + \" gene data.csv\"))\n with open(f, 'a', newline=\"\") as csvfile:\n writ = csv.writer(csvfile)\n writ.writerow(l1)\n if len(tak.genome.weightchr_b) != 0:\n l2 = [tak.ident, \"b\"]\n for gen in tak.genome.weightchr_b:\n l2.append(gen.ident)\n l2.append(gen.weight)\n l2.append(gen.mut_rate)\n l2.append(gen.dom) \n writ.writerow(l2)", "def writeCSV(list, filename):\n with open(filename, \"w\") as file:\n for row in list:\n for i in range(len(row)):\n file.write(str(row[i]))\n if i != len(row) - 1:\n file.write(\",\")\n else:\n file.write(\"\\n\")\n return", "def csv_file(data,output_dir,filename,order = [],head = True):\n with open(output_dir + filename + '.csv', 'w') as f:\n write = csv.writer(f)\n write.writerows(manip.dic_to_list(data,order,head),)\n return None", "def generate_csv(lists, output_file):\n if os.path.isfile(output_file):\n with open(output_file, 'a') as file:\n dataset = tablib.Dataset()\n for l in lists:\n dataset.append([l['Original ASIN'], l['Associated ASIN'], l['Title'], l['Price'], l['Currency Code'], l['Relationship']])\n file.write(dataset.csv)\n else:\n with open(output_file, 'w+') as fp:\n dataset = tablib.Dataset(headers=['Original ASIN', 'Associated ASIN', 'Title', 'Price', 'Currency Code', 'Relationship'])\n for l in lists:\n dataset.append([l['Original ASIN'], l['Associated ASIN'], l['Title'], l['Price'], l['Currency Code'], l['Relationship']])\n fp.writelines(dataset.csv)", "def writeCSV(path,aList):\n\twith open(path,'wb') as w:\n\t\ta = csv.writer(w, delimiter = ',')\n\t\ta.writerows(aList)\n\tw.close()", "def save_csv(filename, save_list):\n with open(filename, mode='w') as csv:\n csv.writelines([','.join(item) + '\\n' for item in save_list])", "def List_to_CSV(OutFname, DataList):\n with open(OutFname, 'w') as myfile:\n wr = csv.writer(myfile, delimiter=',')\n wr.writerows(line for line in DataList)", "def outputFunc(filename, resultList):\n #assert len(parks) == 3\n \n f = open(filename, 'wt')\n \n try:\n writer = csv.writer(f)\n for i in range(len(resultList)):\n print resultList[0]\n writer.writerow(resultList[i])\n \n finally:\n f.close()", "def write_csv(csv_list, out_csv_path):\n with open(out_csv_path, 'w', newline='') as csv_file:\n csv_writer = csv.writer(csv_file)\n for row in csv_list:\n csv_writer.writerow(row)", "def write_csv(fname, olist):\n ofile = open(fname, \"wb\")\n writer = csv.writer(ofile, delimiter=',', quotechar='\"',\n quoting=csv.QUOTE_ALL)\n writer.writerows(olist)", "def save_to_file_csv(cls, list_objs):\n l = []\n if list_objs is not None:\n for item in list_objs:\n l.append(item.to_dictionary())\n with open(\"%s.csv\" % cls.__name__, mode='w') as f:\n f.write(Base.to_json_string(l))", "def generate_csv(type, json_list, columns_list):\n with open(\"data/\" + type + \"_\" + time.strftime(\"%Y-%m-%d_%H:%M:%S\") +\n \".csv\", 'a+') as f:\n csv_file = csv.DictWriter(f, fieldnames=columns_list,\n extrasaction=\"ignore\")\n csv_file.writeheader()\n for item in json_list:\n csv_file.writerow(item)\n print(\"\\nCSV file saved as data/\" + type + \"_\" +\n time.strftime(\"%Y-%m-%d_%H:%M:%S\") + \".csv\")", "def save_to_csv(list_return, name, fieldnames):\n os.makedirs(os.path.dirname(name + '.csv'), exist_ok=True)\n with open(name + '.csv', 'w') as csvfile:\n csvfile.write(','.join(map(str, field_names)))\n csvfile.write('\\n')\n write = csv.writer(csvfile, delimiter=',')\n for x in range(0, len(list_return)):\n write.writerow(list_return[x])", "def _csv_export(self, exppath):\n with open(exppath, 'w') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',', skipinitialspace=True)\n csvwriter.writerow(['hexstr','dmc','name'])\n for clr in self.lookup_table:\n csvwriter.writerow([clr.hex.to_str(), clr.id, clr.name])", "def list_to_csv(list, output_file, header):\n with open(output_file, 'w', ) as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow(header)\n writer.writerows(list)\n\n csvFile.close()", "def csv_save_list(list_data, path, lineterminator='\\n', encoding=None):\n with open(path, 'w') as f:\n writer = csv.writer(f, lineterminator=lineterminator)\n for item in list_data:\n if encoding is not None:\n writer.writerow([item.encode(encoding)])\n else:\n writer.writerow([item])", "def csv_file_creator(path, list_of_jobs):\n with open(path, \"wb\") as out_file:\n writer = UnicodeWriter(out_file, delimiter=',')\n for row in list_of_jobs:\n writer.writerow(row)", "def gp_file(data,filename,output_dir='',order = [],head = False):\n f = open(output_dir + filename + '.csv', 'w')\n f.write(str(len(order)-1) + '\\n')\n write = csv.writer(f)\n write.writerows(manip.dic_to_list(data,order,head),)\n f.closed\n\n return None", "def write_to_csv(list_of_emails):\n import csv\n # use newline='' to prevent double-spaced rows\n with open('emails.csv', 'w', newline='') as outFile:\n outWriter = csv.writer(outFile)\n charNum = outWriter.writerow(['email'])\n for i in list_of_emails:\n charNum = outWriter.writerow([i])\n outFile.close()", "def generate_csv(self, lista):\r\n\t\ts = ''\r\n\t\tsalida = self.get_rel_path() + \"/\" + \"tree_names.csv\"\r\n\t\tfor i in lista:\r\n\t\t\t#st = i[2].split('/')\r\n\t\t\t#newpath = os.path.join(i[1],st)\r\n\t\t\thash = str(i[0])\r\n\t\t\tname_path = str(i[1] + \"/\" + i[2])\r\n\t\t\t#s = s + str(i[0]) + \";\" + i[1] + \"/\" + i[2] + \"\\n\"\r\n\t\t\tself.copy_file(hash,name_path)\r\n\t\t\ts = s + str(hash + \";\" + name_path + \"\\n\")\r\n\r\n\t\tf = open(salida,\"w\")\r\n\t\tf.write(s)\r\n\t\treturn salida", "def write_csv(row_list,out_name,*header_strings : str):\n with open(out_name,'w',newline='') as result_file:\n wr = csv.writer(result_file, delimiter='\\t')\n if header_strings:\n wr.writerow([name for name in header_strings])\n if type(row_list[0]) is list:\n wr.writerows(row_list)\n else:\n for row in row_list:\n wr.writerow([row])", "def trans_list_to_csv(data_list):\n pseudo_buffer = Echo()\n writer = csv.writer(pseudo_buffer)\n first_line = sorted(data_list[0].keys())\n result = \"\\ufeff\" + \"\".join([writer.writerow(first_line)] +\n [writer.writerow([row[k] for k in first_line]) for row in data_list])\n return result", "def writeItemsToCSV(self, fileName, itemList):\n\t\twith open(fileName, 'w') as csvFile:\n\t\t csvWriter = csv.writer(csvFile, delimiter=',')\n\t\t # Column titles\n\t\t csvWriter.writerow([\"Brandname\",\"Productname\",\"Colors\",\"Sizes\",\"Description\",\"Materials\",\"Maintenance\",\"RegularPrice\",\"CurrentPrice\"])\n\t\t for item in itemList:\n\t\t csvWriter.writerow(list(item))", "def csvwrite(inlist, stringify=False):\n out_list = []\n for entry in inlist:\n if stringify:\n new_entry = []\n for val in entry:\n if not isinstance(val, basestring):\n val = str(val)\n new_entry.append(val)\n entry = new_entry\n this_line = ', '.join([elem_quote(val) for val in entry])\n out_list.append(this_line)\n return out_list", "def data_to_csv(json_list, filename='out.csv'):\n with open('out.csv', 'w') as csvfile:\n header = get_header(json_list)\n cw = csv.writer(csvfile)\n cw.writerow(header)\n for dict in json_list:\n row = dict_to_list(dict, header)\n cw.writerow(row)", "def file(self):\n result = []\n completePath = CompletePath(self.path, self.filename) \n with open(completePath.path(), 'w', newline='') as csvfile:\n fieldnames = ['Activity', 'Points']\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n writer.writeheader()\n for i in range ( len( self.groupPriority.rows() ) ):\n tmp = self.groupPriority.rows()[i]\n self.log.info ( \"FinalCSV\", \"file\",\"data {0},{1}\".format( tmp.activity(), tmp.points() ) )\n writer.writerow({'Activity': tmp.activity(), 'Points': tmp.points()})\n self.log.info(\"FinalCSV\", \"file\", \"Elaborated file: {0}\".format ( completePath.path() ) )", "def _export_csv(x, y, export_to):\r\n\r\n with open(export_to, 'w', newline='') as e:\r\n writer = csv.writer(e, delimiter=',')\r\n for i in range (0, len(x)):\r\n writer.writerow([x[i], y[i]])", "def create_csv(self):\n try:\n # Convert List of Lists to DataFrame and write it to a CSV\n pd.DataFrame(self.data, columns=self.header) \\\n .to_csv(os.path.join(self.file_path, self.file_name), index=False)\n self.successful_run = True\n except:\n # TODO create Exception Handling\n raise", "def save_csv(outputfile):\n with open(outputfile, 'w', newline='') as outfile:\n writer = csv.writer(outfile)\n writer.writerow(DATA_KEYS)\n\n # Add data to csv-file\n for data in data_list:\n writer.writerow(data)", "def export_data(fp, app_name):\n from otree.views.admin import get_display_table_rows\n colnames, rows = get_display_table_rows(\n app_name, for_export=True, subsession_pk=None)\n colnames = ['{}.{}'.format(k, v) for k, v in colnames]\n writer = csv.writer(fp)\n writer.writerows([colnames])\n writer.writerows(rows)", "def exportList(self, list_id):\n params = {'LIST_ID' : list_id,\n 'EXPORT_TYPE' : 'ALL',\n 'EXPORT_FORMAT': 'CSV',\n 'FILE_ENCODING': 'utf-8'}\n xrequest = xml_str(self.buildRequestEtree('ExportList', params))\n xresults = self.request(xrequest)\n xpath = '/Envelope/Body/RESULT/FILE_PATH'\n return xresults.xpath(xpath)[0].text", "def write_table_to_csv(table: List[List], filename: str):\n with open(filename, 'w') as csvfile:\n writer = csv.writer(csvfile, delimiter='\\t')\n for row in table:\n writer.writerow(row)", "def write_to_csv(list_of_rows, file_name):\n with open(file_name, 'w') as f:\n writer = csv.writer(f)\n for row in list_of_rows:\n if None in row:\n continue\n writer.writerow(row)\n \n f.close()", "def writeCSV():\n final_list = get_final_list()\n path_to_csv_File = 'system_metrics.csv'\n\n csv_file = open(path_to_csv_File, 'w+', newline='', encoding=\"utf8\")\n csv_file_writer = csv.writer(csv_file, delimiter=',')\n\n csv_file_writer.writerow(['Subscription', 'Resource', 'MetricType',\n 'Timestamp', 'Unit', 'Minimum', 'Maximum', 'Average'])\n\n for item in final_list:\n csv_file_writer.writerow([item['subscription'], item['resource'], item['metricType'], item['timestamp'],\n item['unit'], item['minimum'], item['maximum'], item['average']])\n\n print('Output written successfully!!')", "def write_list(outputfilename, list):\r\n try:\r\n with open(outputfilename, 'w', newline='', encoding='utf-8') as outfile:\r\n itemwriter = csv.writer(outfile, delimiter=\",\")\r\n for item in list:\r\n itemwriter.writerow(item)\r\n except:\r\n input(\"File still open! Please close and press enter to continue\")\r\n with open(outputfilename, 'w', newline='', encoding='utf-8') as outfile:\r\n itemwriter = csv.writer(outfile, delimiter=\",\")\r\n for item in list:\r\n itemwriter.writerow(item)", "def save_to_file_csv(cls, list_objs):\n ld = []\n with open(cls.__name__ + \".csv\", \"w\", encoding=\"utf-8\") as f:\n if list_objs:\n for obj in list_objs:\n if cls.__name__ == 'Rectangle':\n ld.append([\n obj.id, obj.width, obj.height, obj.x, obj.y])\n if cls.__name__ == 'Square':\n ld.append([obj.id, obj.size, obj.x, obj.y])\n writer = csv.writer(f)\n for row in ld:\n writer.writerow(row)", "def csv_output(self):\r\n fh = open(\"output.csv\",'w')\r\n for i in range(len(self.population.columns)):\r\n if i != len(self.population.columns)-1:\r\n fh.write(str(self.population.columns[i]))\r\n fh.write(\",\")\r\n else:\r\n fh.write(str(self.population.columns[i]))\r\n fh.write(\"\\n\")\r\n\r\n for i in range(len(self.population.data)):\r\n for j in range(len(self.population.data[i])):\r\n if j != len(self.population.data[i])-1:\r\n fh.write(str(self.population.data[i][j]))\r\n fh.write(\",\")\r\n else:\r\n fh.write(str(self.population.data[i][j]))\r\n fh.write(\"\\n\")\r\n fh.close()", "def save_to_file_csv(cls, list_objs):\n f_name = cls.__name__ + \".csv\"\n with open(f_name, 'w', newline='') as f:\n if list_objs is None or list_objs == []:\n f.write(\"[]\")\n\n else:\n if cls.__name__ == 'Rectangle':\n h = ['id', 'width', 'height', 'x', 'y']\n else:\n h = ['id', 'size', 'x', 'y']\n ncsv = csv.DictWriter(f, fieldnames=h)\n for obj in list_objs:\n ncsv.writerow(obj.to_dictionary())", "def export_player_list_csv():\n # get players belonging to a team\n players = Player.objects.select_related(\n 'team', 'team__manager'\n ).order_by(\n '-value', 'code'\n )\n\n with open('./data/player_list_auction.csv', 'w') as f:\n writer = csv.writer(f)\n\n for position in Player.POSITION:\n\n writer.writerow([\n 'Code',\t'Name',\t'Team',\t'Value', 'Pts', 'Manager', 'Manager Nominations'\n ])\n\n for p in players.filter(position=position[0]):\n writer.writerow([\n p.code,\n p.name,\n p.prem_team.code,\n p.value,\n p.last_years_total,\n p.team.manager.username if p.team else '',\n p.auction_nomination_managers\n ])", "def user_list_csv():\n us = user.User.query.all()\n filename = 'xxx.csv'\n csv_name = _rename_file(filename)\n url = app.config['CSV_FILES_DEST'] + '/' + csv_name\n with codecs.open(url, 'wb') as csvfile:\n #fieldnames = ['账号', '姓名', '描述', '角色', '邮箱', '电话', '工作电话', '公司', '部门', '职位']\n fieldnames = []\n if len(us) > 0:\n fieldnames = us[0].to_csv_dict().keys()\n writer = unicodecsv.writer(csvfile, encoding='utf-8-sig')\n writer.writerow(fieldnames)\n for u in us:\n dct = u.to_csv_dict()\n n_items = {}\n for name in fieldnames:\n if dct[name] is not None:\n n_items[name] = dct[name]\n else:\n n_items[name] = ''\n writer.writerow(n_items.values())\n return send_file(url)", "def output_to_file(utilist, filepath=\"demo.csv\"):\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n with open(filepath, \"a\") as f:\n f.write(utilist + \"\\n\")", "def writetoCSV(self, fileName):\n\n with open(fileName, 'w') as writeFile:\n writeFile.write(\"ID,Fx,Fy,Fz\\n\")\n for fstnr in F:\n writeFile.write(str(fstnr.ID))\n for i in fstnr.force:\n writeFile.write(',' + str(i))\n writeFile.write('\\n')", "def save_to_file_csv(cls, list_objs):\n with open(cls.__name__ + \".csv\", \"w\", newline='') as f:\n if cls.__name__ == \"Rectangle\":\n fieldnames = ['id', 'width', 'height', 'x', 'y']\n elif cls.__name__ == \"Square\":\n fieldnames = ['id', 'size', 'x', 'y']\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n if list_objs is not None:\n for model in list_objs:\n writer.writerow(model.to_dictionary())", "def write_csv(self, stock_list):\n\n with open(self.outfile, 'w') as outfile:\n writer = csv.writer(outfile, delimiter=',',\n quoting=csv.QUOTE_MINIMAL)\n for symbol, values in stock_list.items():\n # Need to find a better way to handle this...\n writer.writerow([values['symbol'], values['name']])", "def generate_csv(table, header):\n with open(\"%s.csv\" % header, \"w\") as csvfile:\n for i in range(len(table)):\n for j in range(len(table[i])):\n if j != len(table[i])-1:\n tmp = table[i][j] + \",\"\n else:\n tmp = table[i][j] + \"\\n\"\n csvfile.write(tmp)", "def save_items_to_csv(items_data: pd.DataFrame):\n with open('etsy_items.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerows(items_data)", "def dump_list2csv_hdfs(sc, content_list, file_path):\n content_str = ''\n for i in range(len(content_list)):\n i_content = ','.join([str(col) for col in content_list[i]]) + '\\n'\n content_str += i_content\n write_to_hdfs(sc, file_path, content_str, overwrite=True)", "def save_to_file_csv(cls, list_objs):\n list_rectangle = [\"id\", \"width\", \"height\", \"x\", \"y\"]\n list_square = [\"id\", \"size\", \"x\", \"y\"]\n filename = cls.__name__ + \".csv\"\n result = []\n\n if list_objs:\n for objs in list_objs:\n # First recollect the info of the object with a dict\n dictionary = objs.to_dictionary()\n middle_result = []\n # Second obtein the values in a ordered class list\n if cls.__name__ == \"Rectangle\":\n for item in list_rectangle:\n middle_result.append(dictionary[item])\n if cls.__name__ == \"Square\":\n for item in list_square:\n middle_result.append(dictionary[item])\n # append the list to result list\n result.append(middle_result)\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n writer = csv.writer(file)\n writer.writerows(result)", "def save_to_file_csv(cls, list_objs):\n list_dictionaries = []\n if list_objs is None or list_objs == []:\n string_dictionary = \"[]\"\n else:\n for _obj_dict in list_objs:\n list_dictionaries.append(_obj_dict.to_dictionary())\n string_dictionary = Base.to_json_string(list_dictionaries)\n with open(cls.__name__ + \".csv\", \"w\") as _file:\n _file.write(string_dictionary)\n _file.close()", "def export_to_csv(self, file_name):\n \n with open(file_name, 'w', newline='') as csvDataFile:\n csvWriter = csv.writer(csvDataFile, delimiter = ',')\n\n for i in range(0,self.sample_num):\n data = list()\n data.append(self.sample[i].simulation_name)\n data.append(self.sample[i].result_name)\n data.extend(self.sample[i].parameters.tolist())\n data.extend(self.sample[i].result) \n csvWriter.writerow(data)", "def write_csv(self, filelike):\r\n items = self.rows()\r\n writer = unicodecsv.writer(filelike, encoding=\"utf-8\")\r\n writer.writerow(self.header())\r\n for item in items:\r\n writer.writerow(item)", "def ExportAsCSV(csv_out_path, data):\n with open(csv_out_path, \"w\", newline=\"\") as f:\n writer = csv.writer(f, delimiter=',')\n writer.writerows(data)\n f.close()\n return", "def write_output_file(ad_models):\n\n with open('output-data-utf8.csv', 'w', newline='', encoding='UTF-8') as output_file:\n csv_writer = csv.writer(output_file, delimiter=',')\n for ad in ad_models:\n csv_writer.writerow((ad.date.strftime('%Y/%m/%d'), ad.country_code, ad.impression, ad.clicks))", "def column_output(self, output_fname, list_2d):\n\t\toutf = open(output_fname, 'w')\n\t\twriter = csv.writer(outf, delimiter='\\t')\n\t\tfor list in list_2d:\n\t\t\twriter.writerow(list)\n\t\tdel writer\n\t\toutf.close()", "def generate_csv(self, output_file):\n try: # We are going to \"try\" something\n csv_file = open(output_file, 'w+') # open \"output_file\" as a writable file and return a handle called \"csv_file\"\n except OSError as err: # If something goes wrong with the open, we catch the exception\n fatal(\"{0}\".format(err), -1) # exit with something other than 0 so the shell knows something went wrong\n \n writer = csv.writer(csv_file) # create a CSV writing object that's pointing at our open file handle\n\n writer.writerow([\"Question\",\"Answers\"]) # Let's write the top row\n for k in self.questions.keys(): # Let's walk down the directory by key\n # write the \"key\" (which is the question) and then let's take the list of answers and create a comma delmited list.\n # this is likely totally wrong since you could have an answer in it that also has a comma...\n writer.writerow([k, \",\".join(self.questions[k].answers)]) # insert a key (which is the question) and then let's take the array of \n\n csv_file.close() # close the csv_file file handle", "def labels2csv(labels, csv_path):\n with open(csv_path, \"w\") as file:\n file.write(\"id,label\\n\")\n for i, label in enumerate(labels):\n file.write(\"{},{}\\n\".format(i, label))", "def to_csv(header, rows):\r\n with open('result.csv', 'w') as result:\r\n result_writer = csv.writer(result, delimiter=';')\r\n result_writer.writerow(header)\r\n result_writer.writerows(rows)", "def write_to_file(self, results):\n with open(self.outputFilename, \"w\") as csvFile:\n csvWriter = csv.writer(csvFile, delimiter=',') \n title_row = ('asset_id', 'component_id', 'latitude', 'longitude', 'installation_date', 'commissioning_date', 'street_name', 'cabinet_id', 'nominal_wattage', 'current_time', 'current_LogValue', 'current_IsLogValueOff') \n csvWriter.writerow(title_row)\n for record in results:\n csvWriter.writerow(record)", "def add_to_csv(file_name, single_list):\n final_list = read_csv(file_name)\n writer = csv.writer(open(file_name, 'wb'), delimiter=',',quoting=csv.QUOTE_MINIMAL)\n final_list.append(single_list)\n for x in final_list:\n writer.writerow(x)", "def write_csv(self, key_list, word_list):\n # Write out data\n out_data = []\n # Match filtered indexes to words\n for i in key_list.index:\n subset = word_list[word_list['key'] == i]\n # Add to aggregate list\n out_data.append(subset['word'].tolist())\n # Dump list to headerless CSV\n with open(self.output, 'w') as f:\n writer = csv.writer(f)\n writer.writerows(out_data)\n return len(out_data)", "def csv_writelist(file, oldfile, chlst, num):\n import csv\n writelist = checkdifferences(oldfile, chlst, num)\n print('before', writelist)\n with open('{}.csv'.format(file), 'w', newline='') as csvwrite:\n writer = csv.writer(csvwrite, delimiter=';')\n try:\n for eachrow in writelist:\n writer.writerow(eachrow)\n except:\n if TypeError:\n print('Typeerror')\n csvwrite.close()", "def export_users(_request):\n query = models.UserProfile.all().order('email')\n rows = []\n for user in query:\n is_superuser = 0\n if user.is_superuser:\n is_superuser = 1\n rows.append('%s,%s\\n' % (user.email, is_superuser))\n\n response = http.HttpResponse(''.join(rows), mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=users.csv'\n return response", "def export_csv(user, tasks):\n employee_name = user[0]['name']\n employee_id = user[0]['id']\n csvfile = '{}.csv'.format(employee_id)\n with open(csvfile, mode='w') as file:\n towrite = csv.writer(file, delimiter=',', quoting=csv.QUOTE_ALL)\n for task in tasks:\n towrite.writerow([employee_id, employee_name,\n task['completed'], task['title']])", "def save_to_file_csv(cls, list_objs):\n r_fields = ['id', 'width', 'height', 'x', 'y']\n s_fields = ['id', 'size', 'x', 'y']\n filename = cls.__name__ + \".csv\"\n new_list = []\n with open(filename, \"w\") as fp:\n if cls.__name__ == \"Rectangle\":\n dict_writer = csv.DictWriter(fp, fieldnames=r_fields)\n elif cls.__name__ == \"Square\":\n dict_writer = csv.DictWriter(fp, fieldnames=s_fields)\n dict_writer.writeheader()\n for objs in list_objs:\n dict_writer.writerow(objs.to_dictionary())", "def guarda_archivos_csv(lista_archivos, nom_arch):\n da = open(nom_arch, \"w\")\n csv_writer = csv.writer(da)\n for arch in lista_archivos:\n fila = [arch[\"nombre\"], arch[\"ext\"], arch[\"peso\"], arch[\"fecha\"]]\n csv_writer.writerow(fila)\n da.close()", "def save_csv(outfile, cities):\n writer = csv.writer(outfile)\n writer.writerow(['Name'])\n for row in cities:\n writer.writerow([row])", "def _write_csv(self):\n\n # add the label to the header\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self.header.append('Date')\n else:\n self.header.append('sample id')\n\n key_list = []\n\n for i, cube in enumerate(self.cube_list):\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self._write_sample_with_date(cube, i, key_list)\n else:\n self._write_sample(cube, i, key_list)\n\n output_data_file_path = self._get_full_file_name()\n self._write_data_dict(output_data_file_path, key_list)\n\n return [output_data_file_path]", "def write(l, path, columns):\n \n file = open(path, 'w', newline = '', encoding = 'utf-8')\n writer = csv.writer(file, delimiter = '\\t', quotechar = '', quoting = csv.QUOTE_NONE)\n row = []\n for col in columns:\n row.append(col)\n writer.writerow(row)\n for entry in l:\n row = []\n for col in columns:\n row.append(entry[col])\n writer.writerow(row)\n file.close()", "def write_csv_file (metadata_list, csv_file, append) :\n try :\n with open (csv_file, 'a' if append else 'w' , newline='') as file :\n writer = csv.DictWriter(file, fieldnames=MetadataEntity.get_fieldnames())\n if not append: writer.writeheader()\n for e in metadata_list :\n writer.writerow(e.get_values())\n file.close()\n except :\n print ('ERROR: writing csv file: ' + csv_file)\n return False\n return True", "def makeCSV(self,file_name, data, topList):\n file_name = file_name+\".csv\"\n w = csv.writer(open(file_name, \"w\"))\n w.writerow(topList)\n for key, val in data.items():\n row = list(val)\n row.insert(0,key)\n w.writerow(row)", "def to_csv(self, path):\n for table in ['datasets', 'dataruns', 'hyperpartitions', 'classifiers']:\n df = pd.read_sql('SELECT * FROM %s' % table, self.session.bind)\n df.to_csv(os.path.join(path, '%s.csv' % table), index=False)", "def to_csv_file_obj(self, rows):\n output = StringIO.StringIO()\n writer = csv.writer(output)\n writer.writerows(rows)\n return output", "def save_csv(self, filename): # DONE\n self.data.to_csv(filename)", "def write_csv(self, outfile, collapse_orders=False, show_age=False):\r\n # Write header row\r\n outfile.write(self.get_csv_header(collapse_orders, show_age).encode())\r\n\r\n # Write content\r\n for x in self.records:\r\n x.write_csv(outfile, collapse_orders, show_age)", "def export_csv(self):\n outputfile = tkinter.filedialog.asksaveasfilename(\n defaultextension=\".csv\",\n filetypes=((\"comma seperated values\", \"*.csv\"),\n (\"All Files\", \"*.*\")))\n if outputfile:\n tabledata = self.tabs.window.aistracker.create_table_data()\n export.write_csv_file(tabledata, outputfile)\n else:\n raise ExportAborted('Export cancelled by user.')", "def to_csv(self, csvwriter):\n csvwriter.writerow(self.to_csv_row())", "def save_class_list():\r\n try:\r\n classStringList.clear() #clear the classString List\r\n for i in range(0,len(classes)):\r\n classStringList.append(classes[i].csvRow()) #enter classes to the classStringList from the classes\r\n f = open(\"mySchedule.csv\", 'w', newline ='')\r\n csv.writer(f).writerow([\"Day\", \"Class\", \"Start Time\", \"End Time\"])\r\n for classCSVString in classStringList:\r\n csv.writer(f).writerow(classCSVString)\r\n f.close()\r\n except Exception as e:\r\n print(\"Exception found:\" + e)", "def write_csv(file_names: list, csv_file_path: str):\n with open(csv_file_path, mode='w') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerows(enumerate(file_names))", "def csv_writer(data, path):\n\twith open(path, \"wb\") as csv_file:\n\t\twriter= csv.writer(csv_file, delimiter=',')\n\t\twriter.writerows(data)", "def writeCSV(filename, separator, data):\n \n filetowrite = open(filename, \"w\")\n values = []\n i = 0 #Count the number of objects already written\n for item in data:\n filetowrite.write(item)\n i += 1\n if i < len(data.keys()):\n filetowrite.write(separator)\n values.append(data[item])\n filetowrite.write(\"\\n\")\n i = 0\n for value in values:\n filetowrite.write(str(value))\n i += 1\n if i < len(values):\n filetowrite.write(separator)\n \n filetowrite.close()", "def createFileCSV(table, path=\"./prediction\"):\t\n\tif len(table) < 1:\n\t\traise NameError('Empty Table!')\n\telse:\n\t\tfile = open(path + '.csv', 'w+')\n\n\t\tfile.write(table[0].toStringHeaders() + \"\\n\")\n\n\t\tfor row in table:\n\t\t\tfile.write(row.toStringCSV() + '\\n')\n\t\tfile.close()", "def save_csv(companies):\n print(\"Saving companies.csv...\")\n\n Path(\"output\").mkdir(parents=True, exist_ok=True)\n file_name = 'output/companies.csv'\n\n with open(file_name, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=',')\n i = 0\n while i < 500:\n company = companies[i]\n name = company.text\n url = company.get_attribute('href')\n writer.writerow([name, url])\n i = i + 1\n \n print('companies.csv created')", "def each_to_csv(data, key, value):\n data.to_csv(\"camelot/clean/nrld_{}_{}.csv\".format(key, value), index=False)\n return data", "def writeToMonthCsv(news_dict):\n\n for k in news_dict:\n output_f = open(k + \".csv\", \"wb\")\n writer = csv.writer(output_f)\n writer.writerow([news_dict[k].replace(\",\", \"\").encode(\"utf-8\")])\n output_f.close()", "def convert2csv(contacts, output_path):\n\n print(\"[!] not implemented yet\")", "def csv(self, destination_path):\n # todo - test for single and duplicate base cases\n to_csv(self._axl_data, destination_path)", "def generate_csv(inf, outf):\n o = csv.writer(outf)\n o.writerow(COLUMNS)\n for row in reformat_data(inf):\n o.writerow([inf.name] + row)", "def CSVWriter (iterable, outLoc, header=\"\", ):\n if not iterable:\n print (\"nothing to write\")\n return 0\n\n out = open(outLoc, 'w')\n\n if header:\n out.write(header+'\\n')\n\n #Only works if iterable is a nested list\n for member in iterable:\n for item in member:\n out.write(str(item)+',')\n out.write('\\n')\n\n print(\"write to \"+outLoc+\" successful.\")\n return 1", "def export_as_csv(modeladmin, request, queryset):\n opts = modeladmin.model._meta\n field_names = [field.name for field in opts.fields]\n labels = []\n\n if exclude:\n field_names = [f for f in field_names if f not in exclude]\n\n elif fields:\n field_names = [field for field, _ in fields]\n labels = [label for _, label in fields]\n\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=%s.csv' % (\n str(opts).replace('.', '_')\n )\n\n writer = csv.writer(response)\n\n if header:\n writer.writerow(labels if labels else field_names)\n\n for obj in queryset:\n writer.writerow([prep_field(request, obj, field, manyToManySep) for field in field_names])\n return response", "def export_csv(self, csvfileobject):\n for index, track in enumerate(self._tracks):\n csvfileobject.writerow(track.properties)\n for delta in track.periods: \n csvfileobject.writerow(delta.properties)", "def sites_csv():\n import io\n import csv\n\n dest = io.StringIO()\n dest.write('\\ufeff')\n writer = csv.writer(dest, quoting=csv.QUOTE_MINIMAL)\n\n with Config() as config:\n with db.Connection(config) as con:\n writer.writerow(con.fieldnames)\n writer.writerows(con.read_entries())\n\n output = flask.make_response(dest.getvalue())\n output.headers[\"Content-Disposition\"] = \"attachment; filename=spatialcitizenscience.csv\"\n output.headers[\"Content-type\"] = \"text/csv\"\n return output", "def save_server_list_csv(server_list):\n global most_fields\n\n # Get 'key/server_name' with most 'columns'\n key = list(most_fields.keys())[-1]\n # Get record from server_list to provide headers\n srv = server_list[str(key)]\n # Create headers for CSV\n headers = list(srv.keys())\n\n with open('new-servers.csv', 'w', newline='') as of:\n writer = csv.writer(of)\n writer.writerow(headers)\n\n for server in server_list.values():\n server_values = list(server.values()) # Y\n writer.writerow(map(str, server_values))", "def export_corpus_csv(corpus,path, delimiter = ',', trans_delimiter = '.'):\n word = corpus.random_word()\n header = sorted(word.descriptors)\n with open(path, encoding='utf-8', mode='w') as f:\n print(delimiter.join(header), file=f)\n for key in corpus.iter_sort():\n print(delimiter.join(make_safe(getattr(key, value),trans_delimiter) for value in header), file=f)", "def write_data_to_csv(cell_cent_top_lst, u_top_fe_conv_lst, disp_cent_PD_array_lst, u_disp_PD_array_lst, file_path, file_name):\n import csv\n def _write_(abs_file_path, arr):\n with open(abs_file_path, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=';')\n writer.writerows(arr)\n\n\n num_data = len(cell_cent_top_lst)\n for i in range(num_data):\n cel_cnt_file_name = path.join(file_path, file_name + str(i).zfill(2) + '_cel_cnt_top.csv')\n ufe_file_name = path.join(file_path, file_name + str(i).zfill(2) + '_ufe_top.csv')\n dsc_file_name = path.join(file_path, file_name + str(i).zfill(2) + '_dsp_cnt.csv')\n u_dsp_file_name = path.join(file_path, file_name + str(i).zfill(2) + '_u_dsp.csv')\n\n _write_(cel_cnt_file_name, cell_cent_top_lst[i])\n _write_(ufe_file_name, u_top_fe_conv_lst[i])\n _write_(dsc_file_name, disp_cent_PD_array_lst[i])\n _write_(u_dsp_file_name, u_disp_PD_array_lst[i])\n\n return", "def write_the_contents_to_the_same_file(self):\n if not len(self.student_list):\n print('There is no contents to write')\n return\n\n if self._filename is None:\n self._filename = self.input_filename()\n\n with open(self._filename, 'w') as OUT:\n OUT.write(self.student_list.to_csv(date_format='%Y-%m-%d',\n sep='\\t', header=False, columns=self.columns_to_save))\n print(f'Data are saved into {self._filename!r}')", "def output(items, headers, outputFile):\n\tdictToValues = lambda d: \\\n\t\tmap(lambda h: d.get(h, ''), headers)\n\n\treturn writeCsv(outputFile, map(dictToValues, items))", "def save_double_list(list1, list2, filename):\r\n the_file = open(filename, \"wb\")\r\n try:\r\n writer = csv.writer(the_file)\r\n if len(list1)!=len(list2):\r\n raise Exception(\"Saving a double list : The list have not the same length !\")\r\n for i in range(len(list1)):\r\n writer.writerow( (list1[i], list2[i]) ) \r\n finally:\r\n the_file.close()", "def output_csv_list_of_rows(csv_list_of_rows, output_file):\n sys.stdout = open(output_file, 'w')\n row_count = 0\n for row in csv_list_of_rows:\n item_count = 0\n for item in row:\n if item_count != 0:\n sys.stdout.write(',')\n sys.stdout.write(str(item))\n item_count += 1\n row_count += 1\n if row_count != len(csv_list_of_rows):\n sys.stdout.write('\\n')" ]
[ "0.7663665", "0.75958973", "0.74424946", "0.7424865", "0.7408895", "0.7338322", "0.7334891", "0.73170066", "0.72265005", "0.71845925", "0.7141257", "0.7123623", "0.71039116", "0.70734674", "0.70368683", "0.7021018", "0.6982247", "0.6972423", "0.6959962", "0.69387776", "0.693792", "0.69227654", "0.6909969", "0.6879819", "0.6867224", "0.68456227", "0.68252164", "0.6817719", "0.67889977", "0.6788919", "0.6786127", "0.67746115", "0.6762141", "0.67589706", "0.67577434", "0.67574704", "0.675007", "0.67363876", "0.67357016", "0.6718727", "0.671644", "0.6713166", "0.6703447", "0.66873175", "0.6679424", "0.66742724", "0.6651383", "0.6650287", "0.6650083", "0.6649245", "0.66155773", "0.6614225", "0.66085", "0.66004515", "0.6585972", "0.6584399", "0.65810955", "0.65745723", "0.6558691", "0.65469855", "0.65438634", "0.65402555", "0.65392286", "0.6539204", "0.6535071", "0.65319324", "0.65293473", "0.65245724", "0.6521923", "0.6518339", "0.65170574", "0.6515541", "0.65097046", "0.6489435", "0.6478812", "0.6468314", "0.6466343", "0.64532363", "0.64526683", "0.64437765", "0.64404196", "0.6438559", "0.64379305", "0.6426649", "0.6416162", "0.641198", "0.64000857", "0.6397854", "0.6396809", "0.63891804", "0.6388005", "0.6387297", "0.6385894", "0.63837445", "0.63799536", "0.6376514", "0.6369265", "0.63688254", "0.6363627", "0.6362959" ]
0.6904354
23
Write array to a file as text or binary (default).
def quick_save_array(data, file_name, delimiter=',', ): data.tofile(file_name, sep=delimiter)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_txt(data, file_path):\n array = sanitise_array(data)\n\n # If the data is floating then format the values in scientific notation.\n if np.issubdtype(array.dtype, np.floating):\n array = array.astype(np.float32)\n formatter = lambda x: f'{x:.12E}'\n elif np.issubdtype(array.dtype, np.integer):\n array = array.astype(np.int32)\n formatter = lambda x: str(x)\n else:\n raise TypeError(f'Type of the data could not be serialised - {array.dtype}')\n\n lines = [' '.join(formatter(val) for val in row) + '\\n' for row in array]\n with open(file_path, 'w') as f:\n f.writelines(lines)", "def write_file(self):\n print 'Writing '+self.name+' binary...'\n if self.vals is not None:\n if len(self.vals) == self.size:\n stream = self.pack_mem()\n with open(self.name+'.bin','wb') as f:\n f.write(stream)\n print 'File written: '+self.name+'.bin'\n else:\n print 'Error: input array for '+self.name+'is not the right '+\\\n 'size (should be '+str(self.size)+'). Skipping.'\n else:\n print 'No array provided, skipping.'", "def print_to_file(arr, fid, sep=\"\", format=\"%s\"):\n\n f = array_create.array(arr, bohrium=False)\n return f.tofile(fid, sep=sep, format=format)", "def ArraytoFile(_array):\n\tfile = open('sort1.txt', 'w')\n\tfor line in _array:\n\t\tfile.write(line+\"\\n\")\n\tfile.close()", "def save_array(array, filename):\n np.save(filename, array)", "def write_output(arr, filename):\n print('Started writing the output..')\n f = open(filename, 'w')\n for a in arr:\n f.write(str(a) + '\\n')\n f.close()\n print('Done!, Open the file to see the approved loans.')", "def write_text_file(data, file_name):\n\timport types\n\toutf = open(file_name, \"w\")\n\tif (type(data[0]) == types.ListType):\n\t\t# It is a list of lists\n\t\tfor i in xrange(len(data[0])):\n\t\t\tfor j in xrange(len(data)):\n\t\t\t\tif type(data[j][i]) == type(0):\n\t\t\t\t\toutf.write(\" %12d\"%data[j][i])\n\t\t\t\telse:\n\t\t\t\t\toutf.write(\" %12.5g\"%data[j][i])\n\t\t\toutf.write(\"\\n\")\n\telse:\n\t\t# Single list\n\t\tfor j in xrange(len(data)):\n\t\t\tif type(data[j]) == type(0):\n\t\t\t\toutf.write(\" %12d\\n\"%data[j])\n\t\t\telse:\n\t\t\t\toutf.write(\" %12.5g\\n\"%data[j])\n\toutf.close()", "def cast_numpy_to_txt(arr, output_file):\n shape = arr.shape\n arr = arr.reshape([shape[0] * shape[1], shape[2]])\n\n np.savetxt(fname=output_file, X=arr, delimiter=' ', fmt='%.18e', newline='\\n', )", "def writeArray(fname,arr):\n fh = open(fname,'w')\n fh.write('%d\\n' % arr.shape[0])\n fh.write('%d\\n' % arr.shape[1])\n for x in range(arr.shape[0]):\n for y in range(arr.shape[1]):\n if arr.dtype == np.complex:\n fh.write('%.7e %.7e\\n' % (arr[x,y].real, arr[x,y].imag))\n else:\n fh.write('%.7e\\n' % (arr[x,y]))\n fh.close()", "def write(fname, data):\n # Encode to string.\n encoder = NumpyJSONEncoder(check_circular=True, indent=' ')\n serial = encoder.encode(data)\n\n # Write to file.\n with open(fname, 'w') as fo:\n fo.write(serial)", "def write_csv_file(array, filename):\n\tnp.savetxt(filename, array, delimiter=\",\")", "def write_to_txt(data, filename, attr='w'):\n f = open(filename, attr, encoding='utf-8', errors='ignore')\n for item in data:\n f.write(item.__str__())\n f.close()", "def to_txt(self, fpath):\n np.savetxt(fpath, self._arr.T)", "def save(file, arr, allow_pickle=True, fix_imports=True):\n\n return numpy.save(file, array_create.array(arr, bohrium=False), allow_pickle, fix_imports)", "def binary_out(array, fnam, dt=np.dtype(np.float64), endianness='big', appendDim=False):\r\n if appendDim == True :\r\n fnam_out = fnam + '_'\r\n for i in array.shape[:-1] :\r\n fnam_out += str(i) + 'x' \r\n fnam_out += str(array.shape[-1]) + '.raw'\r\n else :\r\n fnam_out = fnam\r\n arrayout = np.array(array, dtype=dt)\r\n if sys.byteorder != endianness:\r\n arrayout.byteswap(True)\r\n arrayout.tofile(os.path.abspath(fnam_out))", "def write_txt(data, out_path, type=\"w\"):\n with open(out_path, type) as f:\n f.write(data.encode(\"utf-8\"))", "def writeList2File(filename, array, overwrite=False, separator=';'):\n mode = 'a'\n if overwrite:\n mode = 'w'\n file = open(filename, mode)\n file.write(separator.join(map(str,array)) + '\\n')", "def numpy_2_file(narray, file, path=OUTPUT_PATH, sep=',' ):\n file_path = path + file\n narrayc = numpy.copy(narray)\n numpy.place(narrayc,numpy.logical_or(narrayc==-1,narrayc==-2), 2)\n dataset = numpy.copy(narrayc).astype(str)\n numpy.place(dataset,dataset=='2', '*')\n d=numpy.atleast_2d(dataset)\n numpy.savetxt(file_path, d, delimiter=sep, fmt='%s')\n return", "def _write_array_on_file(self, pa_array):\n pa_batch = pa.RecordBatch.from_struct_array(pa_array)\n self._num_bytes += pa_array.nbytes\n self.pa_writer.write_batch(pa_batch)", "def output_file(newarray, filename):\n np.savetxt(filename + \"_formatted.txt\", newarray, delimiter=\" \", fmt=\"%s\")", "def write(data):", "def _serialize_array(self, array):\n buffer = io.BytesIO()\n np.save(buffer, array)\n return buffer.getvalue()", "def save(data, file, compression=0):\n f = file if isinstance(file, bob.io.base.HDF5File) else bob.io.base.HDF5File(file, 'w')\n if hasattr(data, 'save'):\n data.save(f)\n else:\n f.set(\"array\", data, compression=compression)", "def data_to_file(data, ta_file):\n file_handle = file(ta_file, \"w\")\n file_handle.write(data_to_string(data))\n file_handle.close()", "def pickle(array, file):\r\n\timport cPickle\r\n\tfo = open(file,'wb')\r\n\tcPickle.dump(array,fo)\r\n\tfo.close()", "def create_output_file(arr):\r\n for i in arr:\r\n output_file.write(f'{i[0]}\\t{i[1]}\\n')", "def save_array(self, name: str, array: np.ndarray):\r\n np.savetxt(self._path_for_csv(name), array, delimiter=\",\")", "def save_bin(data, file_path):\n np.save(file_path, data)", "def write( data ):", "def write_to_file(filepath, data):\n\n with open(filepath, 'w') as f:\n f.write(str(data))", "def save_to_file(samps, filename, save_as_numpy):\n with open(filename, 'wb') as out_file:\n if save_as_numpy:\n np.save(out_file, samps, allow_pickle=False, fix_imports=False)\n else:\n samps.tofile(out_file)", "def _write_txt(\n output_path, records\n):\n output_path.write_text(_records_to_string(records))", "def _save_binary(file_name, data):\n with open(file_name, \"wb\") as f:\n cp.dump(data, f)", "def bin_writer(fpath, fname, data):\n path = fpath + fname + '.dat'\n with open(path, 'ab') as file:\n for row in data:\n file.write(row.encode('utf-8'))\n return None", "def write_to_file(file: Text, data: bytes):\n with open(file, \"wb\") as w:\n w.write(data)\n w.flush()", "def write_to_file(info: List[str]) -> None:\n return", "def write_array(self, array_name, fam=None, overwrite=False, **kwargs):\n\n # Determine whether this is a write or an update\n if fam is None:\n fam = self.families()\n\n # It's an update if we're not fully replacing the file on\n # disk, i.e. there exists a family f in self.families() but\n # not in fam for which array_name is loadable\n is_update = any([array_name in self[\n f].loadable_keys() and f not in fam for f in self.families()])\n\n if not hasattr(self, \"_write_array\"):\n raise OSError(\n \"The underlying file format class does not support writing individual arrays back to disk\")\n\n if is_update and not hasattr(self, \"_update_array\"):\n raise OSError(\n \"The underlying file format class does not support updating arrays on disk\")\n\n # It's an overwrite if we're writing over something loadable\n is_overwriting = any([array_name in self[\n f].loadable_keys() for f in fam])\n\n if is_overwriting and not overwrite:\n # User didn't specifically say overwriting is OK\n raise OSError(\n \"This operation would overwrite existing data on disk. Call again setting overwrite=True if you want to enable this behaviour.\")\n\n if is_update:\n self._update_array(array_name, fam=fam, **kwargs)\n else:\n self._write_array(self, array_name, fam=fam, **kwargs)", "def write(cls, file, data):\n file.write(data)", "def _saveBinaryData(self, file, with_axis=None):\n if with_axis is not None:\n data = self._data_with_axis(with_axis)\n numpy.save(file, data)\n else:\n numpy.save(file, self.data)", "def array_to_file(filename, a):\n a = normalize_array(a)\n i = Image.fromarray(a.astype('uint8'))\n return i.save(filename)", "def write_text(file_path, text):\n # Check if file ends with txt\n if not file_path.endswith('.txt'):\n raise IllegalArgumentError(f\"{file_path} needs to have a .txt extension\")\n\n # Write file\n with open(file_path, 'w') as file:\n if isinstance(text, str):\n file.write(text)\n elif isinstance(text, list):\n file.writelines(text)\n else:\n raise IllegalArgumentError(\"text variable is not a string or list of strings\")\n\n return True", "def save_data(data,file):\n\n f = open(file, mode='w',encoding='utf-8', buffering=1024)\n for t in data:\n f.write(str(t[0]) + ', ' + str(t[1]) + '\\n')\n f.close()", "def write(self, bytes_array):\n self.__str += bytes_array.decode(\"utf-8\")", "def save_data(data, filename='data.txt'):\n with open(filename, 'w', encoding=\"utf-8\") as file:\n for item in data:\n print(item, file=file)", "def write_binary(self, path):\n return", "def savefile(filename, data):\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n output = dumps(data, ensure_ascii=False, sort_keys=True, indent=2)\n file.write(output)", "def write(self, filename, data):\n raise NotImplementedError", "def saveCorpusFile(output_path, arr, format, features):\n def rowMap(x):\n if format == \"csv\":\n if features:\n x = x.split(\",\")[1]\n else:\n parts = x.split(\",\")\n parts.pop(0)\n x = \" \".join(parts)\n return x.replace(\",\", \" \")\n if format == \"tsv\":\n if features:\n x = x.split(\"\\t\")[1]\n else:\n parts = x.split(\"\\t\")\n parts.pop(0)\n x = \" \".join(parts)\n return x.replace(\"\\t\", \" \")\n\n arr_corpus = map(lambda x: rowMap(x), arr)\n with open(output_path, 'w+') as corpusfile:\n for row in arr_corpus:\n corpusfile.write(row + \"\\n\")", "def test_save_as_ascii(self):\n array = np.array([0, 1, 3])\n errors = np.array([1, 1, 1])\n hen.io.save_as_ascii(\n [array, errors],\n filename=os.path.join(self.datadir, \"monol_test.txt\"),\n colnames=[\"array\", \"err\"])", "def write_to_txt(batch, filepath, typ='vector', verbose ='dataset'):\n if typ == 'params':\n with open(filepath, \"w\") as txt:\n for param_pack in batch:\n txt.write(\"{} ; {} ; {} \\n\".format(param_pack.alphaL, \n param_pack.alphaR, param_pack.label))\n #txt.write(str(param_pack.alphaL) +\", \"+str(param_pack.alphaR) +\"\\n\")\n elif typ == 'vector':\n if verbose == 'human':\n with open(filepath, \"w\") as txt:\n for vector in batch:\n txt.write(str(vector[0]) + \"\\n\")\n txt.write(str(vector[1]) + \"\\n\")\n elif verbose == 'dataset':\n with open(filepath, \"w\") as txt:\n for vector in batch:\n #txt.write(str(vector[0].x) +\";\"+str(vector[0].y) +\";\"+ str(vector[0].angle) +\";\"+ str(vector[0].norm()) + \"\\n\")\n txt.write(str(vector[1].x) +\";\"+ str(vector[1].angle) +\";\"+ str(vector[1].norm()) + \"\\n\")", "def write_data(self, data, path):\n if self.data_format == 'twenty': \n length = 20\n else: raise ValueError(\"self.data_format = '%s' unknown.\" % \n self.data_format)\n if len(data.shape) == 1: data = data.reshape((data.shape[0],1))\n with open(path,'w') as f:\n for k in range(data.shape[0]):\n f.write(''.join(\n [str(data[k,l]).rjust(length) for l in range(data.shape[1])]\n ) + '\\n')", "def save_python_data(path, data):\n with codecs.open(path, encoding='UTF-8', mode='w') as fo:\n for d in data:\n fo.write(repr(d))\n fo.write('\\n')", "def write(file, text):\n with open(file, 'w') as f:\n f.write(text)", "def write_bin(file, binary, buffer=None, append=True):\n\n # Get current stream, default or not.\n stream = cp.cuda.get_current_stream()\n\n if buffer is None:\n buffer = cp.asnumpy(binary)\n else:\n binary.get(out=buffer)\n\n if append is True:\n mode = \"ab\"\n else:\n mode = \"wb\"\n\n with open(file, mode) as f:\n stream.synchronize()\n buffer.tofile(f)", "def write_object_file_to_file(self, file_name):\n with open(file_name, 'wb+') as file:\n file.write(self.object_file.to_binary_array())", "def save_to_array(arr_name, arr_object):\n return np.save(arr_name, arr_object)", "def _write_file(output_path: str, file_content: Iterable[str]) -> None:\n with open(output_path, \"w+\", encoding=\"utf-8\") as f:\n f.writelines(file_content)\n\n logging.info(f\"wrote to '{output_path}'\")", "def write_file(filename, contents):\n\n outfile = open(filename, 'w')\n\n if type(contents) == list:\n for line in contents:\n outfile.write(line)\n elif type(contents) == str:\n outfile.write(contents)\n else:\n raise \"Type for 'contents' not supported: \" + repr(type(contents))\n\n outfile.close()\n\n return", "def adapt_array(self,array):\n import io\n import array,numpy\n out = io.BytesIO()\n numpy.save(out, array)\n out.seek(0)\n \n return out.read()", "def write_data():", "def _write(self, spectra: List[Spectrum], spec_file: IO[AnyStr], as_bytes: bool):\n raise NotImplementedError(SpectrumWriter._write.__qualname__)", "def save_txt(filename, data, encoding):\n with open(filename, \"w\") as f:\n f.write(dump(data, encoding))", "def put_2Darray(file,array,header='',format='',append='no'):\n lista=[]\n for i in range(array.shape[1]):lista.append(array[:,i])\n lista=tuple(lista)\n put_data(file,lista,header,format,append)", "def write_numpy_array_as_wav(numpy_array, sample_rate, file_object):\n # type: (Any, int, IO) -> None\n try:\n import numpy\n from scipy.io.wavfile import write\n except ImportError:\n LOGGER.error(\n \"The Python libraries numpy, and scipy are required for this operation\"\n )\n return\n\n array_max = numpy.max(numpy.abs(numpy_array))\n\n scaled = numpy.int16(numpy_array / array_max * 32767)\n\n write(file_object, sample_rate, scaled)", "def write_data(self, array: np.ndarray, *, remote_operation: bool = False) -> bytes:\n checksum = xxh64_hexdigest(array)\n if self.w_uid in self.wFp:\n self.hIdx += 1\n if self.hIdx >= COLLECTION_SIZE:\n self.wFp[self.w_uid].flush()\n self._create_schema(remote_operation=remote_operation)\n else:\n self._create_schema(remote_operation=remote_operation)\n\n destSlc = (self.hIdx, *[slice(0, x) for x in array.shape])\n self.wFp[self.w_uid][destSlc] = array\n self.wFp[self.w_uid].flush()\n return numpy_10_encode(self.w_uid, checksum, self.hIdx, array.shape)", "def save(fname, data):\n from ..numpy import ndarray as np_ndarray\n if isinstance(data, NDArray):\n data = [data]\n handles = c_array(NDArrayHandle, [])\n if isinstance(data, dict):\n str_keys = data.keys()\n nd_vals = data.values()\n if any(not isinstance(k, string_types) for k in str_keys) or \\\n any(not isinstance(v, NDArray) for v in nd_vals):\n raise TypeError('save only accept dict str->NDArray or list of NDArray')\n if any(isinstance(v, np_ndarray) for v in nd_vals):\n raise TypeError('cannot save mxnet.numpy.ndarray using mxnet.ndarray.save;'\n ' use mxnet.numpy.save instead.')\n keys = c_str_array(str_keys)\n handles = c_handle_array(nd_vals)\n elif isinstance(data, list):\n if any(not isinstance(v, NDArray) for v in data):\n raise TypeError('save only accept dict str->NDArray or list of NDArray')\n if any(isinstance(v, np_ndarray) for v in data):\n raise TypeError('cannot save mxnet.numpy.ndarray using mxnet.ndarray.save;'\n ' use mxnet.numpy.save instead.')\n keys = None\n handles = c_handle_array(data)\n else:\n raise ValueError(\"data needs to either be a NDArray, dict of str, NDArray pairs \"\n \"or a list of NDarrays.\")\n check_call(_LIB.MXNDArraySave(c_str(fname),\n mx_uint(len(handles)),\n handles,\n keys))", "def writeToFile(outputFile, unicode_text):\n fp = outputFile\n # workaround problem if caller gives byte string instead\n unicode_text = safe_unicode(unicode_text)\n utf8_text = unicode_text.encode('utf-8')\n fp.write(utf8_text)\n #fp.close()", "def write(cls, vas):\n with open(Y, 'w') as f_i:\n for items in vas:\n f_i.write('%s ' % items)\n print(\"File written successfully. Check out \\\"output.txt\\\" file\")\n f_i.close()", "def write_raw_text(self, path='.'):\n cells = self.get_cells()\n arrays = []\n for cell in cells:\n arrays.append(cell.data)\n array = np.concatenate(arrays)\n fn = os.path.join(path, self.label + '.txt')\n fmt = []\n p = re.compile('(\\w)(\\d+)')\n for key, value in self.datatype:\n m = p.search(value)\n if m:\n kind, size = m.groups()\n # strings\n if kind == 'S':\n add = '%{}c'.format(size)\n # integers\n elif kind in ['u', 'i']:\n add = '%d'\n else:\n add = '%.8e'\n else:\n add = '%.8e'\n fmt.append(add)\n np.savetxt(fn, array, fmt=fmt, delimiter='\\t')\n return", "def write_array(stream, data):\n # Make Buffer\n buffer_size = struct.calcsize('@f') * len(data)\n output_buffer = ctypes.create_string_buffer(buffer_size)\n\n # Fill Up Buffer\n #struct needs @fffff, one f for each float\n dataformat = '@' + 'f'*len(data)\n struct.pack_into(dataformat, output_buffer, 0, *data)\n\n # Shove contents of buffer out audio port\n stream.write(output_buffer)", "def save_text_file(i):\n\n fn = i['text_file']\n\n s = i['string']\n\n try:\n s = s.replace('\\r', '')\n except Exception as e:\n pass\n\n try:\n s = s.replace(b'\\r', b'')\n except Exception as e:\n pass\n\n m = 'w'\n if i.get('append', '') == 'yes':\n m = 'a'\n\n try:\n s = s.encode('utf8')\n except Exception as e:\n pass\n\n try:\n # if sys.version_info[0]>2:\n # f=open(fn, m+'b')\n # f.write(s)\n # else:\n f = open(fn, m+'b')\n f.write(s)\n except Exception as e:\n return {'return': 1, 'error': 'problem writing text file='+fn+' ('+format(e)+')'}\n\n f.close()\n\n return {'return': 0}", "def save2file(lis, path):\r\n np.save(path, np.array(lis))", "def _exportDataToText(self, file, with_axis=None):\n if with_axis is not None:\n data = self._data_with_axis(with_axis)\n numpy.savetxt(file, data)\n else:\n numpy.savetxt(file, self.data)", "def export_to_file(data, filename='class_data.txt', mode='a'):\n with open (filename, mode) as f:\n if mode == \"w\":\n for record in data:\n line = \",\".join(record)\n f.write(line + \"\\n\")\n elif mode == \"a\":\n line = \",\".join(data)\n f.write(line + \"\\n\")\n else:\n raise ValueError('Wrong write mode')", "def write_int_array(f, path, values, dtype='<i4'):\n dset = f.create_dataset(path, (len(values),), dtype=dtype)\n dset[:] = values\n f.flush()", "def eeg_writeavr(array,tsb,di,file):\t\t\n import shutil as shu\n f=open(file,'w')\n firstline = 'Npts= %i TSB= %i DI= %7.5f SB= %7.5f SC= %i NChan= %i\\n' %(array.shape[1],tsb,di,1,200,array.shape[0]) \n chnam = 'Cz FP1 FP2 F3 F4 C3 C4 P3 P4 O1 O2 F7 F8 T7 T8 P7 P8 Fz Pz FC1 FC2 CP1 CP2 FC5 FC6 CP5 CP6 FT9 FT10 TP9 TP10 PO9 PO10\\n'\n f.write(firstline)\n f.write(chnam)\n for i in range(array.shape[0]):\n tmp = array[i,:]\n f.write(('%7.5f ' * len(tmp)) %tuple(tmp))\n f.write('\\n')\n \n f.close()\n #may want to change this on different machines...\n src = '/Users/crislanting/Projects/EEG/data/33.elp'\n dest = file[:-4] + '.elp'\n shu.copyfile(src,dest)", "def save(self, path_to_save):\n data_to_save = []\n for item in self.data_array:\n if isinstance(item, Result):\n data_to_save.append(item.get())\n\n np.savetxt(path_to_save+'.txt', data_to_save, fmt='%5s', delimiter=',')", "def write_file(_data, _label, _clinical, _contour, _type):\n pickle.dump(np.array(_data), open(_type + '_data.pxl', 'wb'))\n pickle.dump(np.array(_label), open(_type + '_label.pxl', 'wb'))\n pickle.dump(np.array(_clinical), open(_type + '_clinical.pxl', 'wb'))\n pickle.dump(np.array(_contour), open(_type + '_contour.pxl', 'wb'))", "def write(self, outputFile):\n \n try: \n f = open(outputFile + '.py', 'w')\n for trail in self.trails: \n f.write(\"[\")\n for index in trail:\n f.write(\"({0}, {1}), \".format(*index)) \n f.write(\"]\\n\")\n \n except IOError, e:\n msg = \"Exception encountered when attempting \" + \\\n \"to write data to file: {0}.\" + \\\n \"\\n\\t -- Exception was: {1}\" + \\\n \"\\n\\t For help use --help\".format(outputFile, e)\n raise Usage(e)", "def save_data(self, f): \n if not self.sampling:\n self.convert_to_array()\n np.save(f, self.reads)", "def save_to_array(x, y):\n\n with open(settings.data(\"x.npy\"), \"wb\") as file:\n np.save(file, x)\n\n with open(settings.data(\"y.npy\"), \"wb\") as file:\n np.save(file, y)", "def save_array_as(arr, loc=None):\n if loc is None:\n root = tk.Tk()\n root.loc = filedialog.asksaveasfilename(initialdir=\"/\",\n title=\"Save as\",\n filetypes=((\"npy files\", \"*.npy\"),\n (\"all files\", \"*.*\"))\n )\n np.save(root.loc, arr)\n root.destroy()\n else:\n np.save(loc, arr)", "def write_to_binary_file(self, filename):\n\n self.octree.writeBinary(str.encode(filename))", "def write_to_file(\r\n self,\r\n path_to_file,\r\n dtype,\r\n driver=\"GTiff\",\r\n nodata=None,\r\n compress=None,\r\n kwargs=None,\r\n ):\r\n if type(dtype) is str and dtype == \"min\":\r\n dtype = rasterio.dtypes.get_minimum_dtype(self.__arr)\r\n\r\n profile = self.dataset.meta\r\n profile.update(\r\n {\r\n \"driver\": driver,\r\n \"height\": self.__arr.shape[-2],\r\n \"width\": self.__arr.shape[-1],\r\n \"dtype\": dtype,\r\n \"transform\": self.dataset.transform,\r\n \"crs\": self.dataset.crs,\r\n }\r\n )\r\n\r\n if nodata:\r\n profile.update({\"nodata\": nodata})\r\n\r\n if compress:\r\n profile.update({\"compress\": compress})\r\n\r\n if kwargs:\r\n profile.update(**kwargs)\r\n\r\n with rasterio.open(path_to_file, \"w\", **profile) as dst:\r\n dst.write(self.__arr.astype(dtype))", "def write(data, filename, mode='a'):\r\n f = open(filename, mode, encoding='utf8')\r\n f.write(data + '\\n')\r\n f.close()", "def write(self, writer: BitStreamWriter) -> None:\n\n size = len(self._rawArray)\n if self._isAuto:\n writer.writeVarSize(size)\n\n for index in range(size):\n if self._checkOffsetMethod is not None:\n writer.alignTo(8)\n self._checkOffsetMethod(index, writer.getBitPosition())\n self._arrayTraits.write(writer, self._rawArray[index])", "def serialize(self, data):\n if isinstance(data, np.ndarray):\n if data.size == 0:\n raise ValueError(\"Cannot serialize empty array.\")\n return self._serialize_array(data)\n\n if isinstance(data, list):\n if len(data) == 0:\n raise ValueError(\"Cannot serialize empty array.\")\n return self._serialize_array(np.array(data, self.dtype))\n\n # files and buffers. Assumed to hold npy-formatted data.\n if hasattr(data, \"read\"):\n return data.read()\n\n return self._serialize_array(np.array(data))", "def dataToJson(self,dataArray, filename):\n try:\n jsondata = simplejson.dumps(dataArray, indent=4, skipkeys=True, sort_keys=True)\n fd = open(\"./data/\" + filename, 'w')\n fd.write(jsondata)\n fd.close()\n print (\"data written to the file succesfully\")\n except:\n print ('ERROR writing', filename)\n pass", "def save_bin(words,data,fname):\n\n out=open(fname,\"wb\")\n\n rows,dims=data.shape\n out.write(\"{} {}\\n\".format(rows,dims).encode(\"utf-8\"))\n counter=0\n\n for i,w in enumerate(words):\n out.write(w.encode(\"utf-8\"))\n out.write(\" \".encode(\"utf-8\"))\n out.write(struct.pack(\"{}f\".format(dims),*data[i,:]))\n counter+=1\n \n out.close()\n print(\"Model saved to\",fname,file=sys.stderr)", "def write_data(f, data):\n\n f.write(bytes(data, \"UTF-8\"))", "def save_to_file(data, file):\n file_path = os.path.abspath(os.path.dirname(file) + os.path.sep + \".\")\n if not os.path.exists(file_path):\n os.makedirs(file_path)\n file_list = file.split(\".\")\n if isinstance(data, pd.DataFrame):\n file_list[-1] = \"csv\"\n data.to_csv(\".\".join(file_list), index=False)\n else:\n file_list[-1] = \"npz\"\n np.savetxt(\".\".join(file_list), data, delimiter=',')", "def write_to_file(file_name, content):\n with open(file_name, \"w\") as text_file:\n text_file.write(str(content))", "def array2chomp( arr, savename ):\n rows = map( lambda x: str(x)+'\\n', map( tuple, iter( arr ) ) ) \n with open( savename, 'w' ) as fh:\n fh.writelines( rows )", "def write_text(file: Union[str, bytes, int, PathLike],\n embeddings: Embeddings,\n sep=\" \"):\n _write_text(file, embeddings, False, sep=sep)", "def save_data(data, file_name):\r\n file = open(file_name, \"w\")\r\n file.write(data + \"\\n\")\r\n file.close()", "def write(self, fname):\n pass", "def output_file(data, filename):\n with open(filename + '.txt', 'w+') as f_out:\n for char in data:\n f_out.write('U+' + str(hex(ord(char)))[2:] + '\\n')", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self.tsp_turtles\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.conveyor_turtle\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.catch_turtle\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def write(self, binary_log: BinaryLoggable) -> None:\n if binary_log == None:\n return #possibly raise exception\n record_array = binary_log.to_bytes()\n record_len = len(record_array)\n if record_len == 0:\n return #possibly raise exception\n\n log_name = type(binary_log).__name__\n self.file_map.setdefault(log_name, [])\n\n # Writes log_name size and log_name to the end of file\n self.bfile.seek(0,os.SEEK_END)\n self.bfile.write(len(log_name).to_bytes(self.IntLength, byteorder='big'))\n self.bfile.write(bytearray(log_name, self.Encoding))\n\n # Write byte_array size and byte array\n self.bfile.write(record_len.to_bytes(self.IntLength, byteorder='big'))\n self.file_map[log_name].append([self.bfile.tell(),record_len])\n self.bfile.write(record_array)", "def serialize_numpy(self, buff, numpy):\n try:\n buff.write(_get_struct_i().pack(self.numberOfTSPTurtles))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))" ]
[ "0.7022295", "0.66680944", "0.6556973", "0.65567935", "0.6454246", "0.64344746", "0.6400705", "0.6375197", "0.6342715", "0.62929696", "0.6282288", "0.62203467", "0.61905587", "0.61702776", "0.611481", "0.61135733", "0.6070423", "0.6067515", "0.6062281", "0.60030717", "0.5993181", "0.5991136", "0.59821254", "0.5973174", "0.59561884", "0.59361315", "0.592615", "0.59243363", "0.59025913", "0.5900634", "0.5887614", "0.58587784", "0.58391756", "0.58293116", "0.58248085", "0.58208627", "0.5795812", "0.5784751", "0.5773738", "0.5770432", "0.57387376", "0.5710969", "0.5708288", "0.5702184", "0.56885076", "0.56560856", "0.56499815", "0.5648173", "0.56470966", "0.56428015", "0.56257707", "0.56252104", "0.56172097", "0.56145227", "0.56100446", "0.56011343", "0.5599361", "0.5597785", "0.5585811", "0.55785686", "0.5575003", "0.55745775", "0.556677", "0.5560988", "0.5554071", "0.55457556", "0.5540031", "0.5537018", "0.5535599", "0.5532738", "0.5529264", "0.55214703", "0.55178577", "0.55069435", "0.5503231", "0.54779357", "0.547454", "0.54646516", "0.545831", "0.5453722", "0.54375345", "0.54359806", "0.54336935", "0.54306763", "0.54263365", "0.53988254", "0.53975457", "0.53956705", "0.53615516", "0.5359456", "0.5357029", "0.5347693", "0.5345534", "0.5322899", "0.532176", "0.53213406", "0.5318359", "0.5313934", "0.5310345", "0.5309371" ]
0.6797532
1
Get the data from the dataset and apply preprocessing to extract hours.
def get_data(file_name): csv_file = open(file_name, 'rb') train_content = csv.reader(csv_file) # ignore header train_content.next() # preprocessing functions for each column index # Several preprocessing can be defined for each column. # A new variable is associated to EACH preprocessing function preproc_funcs = {0: ['get_hour']} # Read data from file, store it as an integer data = [] for row in train_content: data_row = [] for n, col in enumerate(row): # if the current column requires preprocessing functions, apply them if preproc_funcs.has_key(n): # Each preprocessing give a new column for preproc_func in preproc_funcs[n]: func = globals().get(preproc_func) data_row.append(int(float(func(col)))) # If no preprocessing, do nothing else: data_row.append(int(float(col))) data.append(data_row) csv_file.close() return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_preprocessing(dataset):\r\n df = pd.read_csv(dataset)\r\n df.head()\r\n df.describe()\r\n df.isnull().sum()\r\n df= df.drop(['instant'], axis=1)\r\n df['dteday'] = pd.to_datetime(df['dteday'].apply(str) + ' ' + df['hr'].apply(str) + ':00:00')\r\n return df", "def getHours(data: Sequence[HistoryElement]) -> Sequence[int]:\r\n _checkData(data)\r\n return [x.timeStamp.hour for x in data]", "def load_data(city, month, day):\n \n# Using pandas accessor to find month, day, hour from the Start Time column in the source data\n print(\"A moment please while I find the data....\\n\")\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['month'] = df['Start Time'].dt.month\n df['hour'] = df['Start Time'].dt.hour\n\n return df", "def preprocess(data):\n\tprint(\"\\n--------Data preview--------\\n{0}\".format(data.head()))\n\t# transform datatime columns to four columns includes the year、month、day、hour\n\tdata['year'] = pd.DatetimeIndex(data['datetime']).year\n\tdata['month'] = pd.DatetimeIndex(data['datetime']).month\n\tdata['day'] = pd.DatetimeIndex(data['datetime']).day\n\tdata['hour'] = pd.DatetimeIndex(data['datetime']).hour\n\n\tdata[\"date\"] = data.datetime.apply(lambda x : x.split()[0])\n\tdata[\"weekday\"] = data.date.apply(lambda dateString :\n\t calendar.day_name[datetime.strptime(dateString,\"%Y-%m-%d\").weekday()])\n\t# after transformed delete the 'datatime' column\n\tdataDroped = data.drop(['datetime'], axis=1)\n\tprint(\"\\n-------\\nAfter preprocess(transform time display format to avoid object data type)\\n-------\")\n\treturn dataDroped", "def load_data(city, time_filter, month, day):\n print(\"\\nLoading data and preprocessing...\")\n start_time = time.time()\n df = pd.read_csv(CITY_DATA[city])\n\n #Create columns for the dataframe for convinience in calculations\n df['start_year'] = [int(value[0:4]) for value in df[\"Start Time\"]]\n df['start_month'] = [int(value[5:7]) for value in df[\"Start Time\"]]\n df['start_day'] = [int(value[8:10]) for value in df[\"Start Time\"]]\n df['start_hour'] = [int(value[11:13]) for value in df[\"Start Time\"]]\n df['start_min'] = [int(value[14:16]) for value in df[\"Start Time\"]]\n df['start_sec'] = [int(value[17:20]) for value in df[\"Start Time\"]]\n\n df['end_year'] = [int(value[0:4]) for value in df[\"End Time\"]]\n df['end_month'] = [int(value[5:7]) for value in df[\"End Time\"]]\n df['end_day'] = [int(value[8:10]) for value in df[\"End Time\"]]\n df['end_hour'] = [int(value[11:13]) for value in df[\"End Time\"]]\n df['end_min'] = [int(value[14:16]) for value in df[\"End Time\"]]\n df['end_sec'] = [int(value[17:20]) for value in df[\"End Time\"]]\n\n #Create a new column for the dataframe containing the day of the week\n day_of_week = []\n for index, row in df.iterrows():\n day_of_week.append( dt.date(row[\"start_year\"], row[\"start_month\"], row[\"start_day\"]).weekday())\n df['day_of_week'] = day_of_week\n\n #Apply the required filters and return the dataframe\n if time_filter == 'none':\n load_data_print(start_time)\n return df\n elif time_filter == 'month':\n df = df[(df['start_month']==MONTH[month])]\n load_data_print(start_time)\n return df\n elif time_filter == 'day':\n df = df[(df['day_of_week']==WEEKDAY[day])]\n load_data_print(start_time)\n return df\n else:\n print(\"\\nERROR on viable_unput_time value\")\n return 1", "def get_data_by_time(filename):\n with open(filename, 'r') as f_in:\n # set up csv reader object\n reader = csv.DictReader(f_in)\n result = {}\n result['n_week'] = [0] * 7\n result['d_week'] = [0] * 7\n result['cus_hour'] = [0] * 24\n result['sub_hour'] = [0] * 24\n for data in reader:\n duration = float(data['duration'])\n if data['day_of_week'] == 'Sunday':\n result['n_week'][0] += 1\n result['d_week'][0] += duration\n elif data['day_of_week'] == 'Monday':\n result['n_week'][1] += 1\n result['d_week'][1] += duration\n elif data['day_of_week'] == 'Tuesday':\n result['n_week'][2] += 1\n result['d_week'][2] += duration\n elif data['day_of_week'] == 'Wednesday':\n result['n_week'][3] += 1\n result['d_week'][3] += duration\n elif data['day_of_week'] == 'Thursday':\n result['n_week'][4] += 1\n result['d_week'][4] += duration\n elif data['day_of_week'] == 'Friday':\n result['n_week'][5] += 1\n result['d_week'][5] += duration\n else:\n result['n_week'][6] += 1\n result['d_week'][6] += duration\n\n hour = int(data['hour'])\n if data['user_type'] == 'Customer':\n result['cus_hour'][hour] += 1\n else:\n result['sub_hour'][hour] += 1\n return result", "def getHourlyLoads(self):\n\n\t\tloads_data = self.getDataForLoadComparisons()\n\t\tload_values = [] # Array that will contain all the load data\n\t\tload_data = {} # Dictionary of load data\n\t\thour = 0 # Counter that determines the 24 hours in a day\n\n\t\t# Parsing load data\n\t\ttoday = self.helper.getMonth() + \"/\" + self.helper.getDay() + \"/\" + self.helper.getYear()\n\t\tfor data in loads_data[0]['values']:\t\t\t\n\t\t\tif data[\"label\"] == \"12:00 AM\":\n\t\t\t\tdata[\"label\"] = \" 00:00\"\n\t\t\telif data[\"label\"].split(\" \")[1] == \"AM\":\n\n\t\t\t\thour = int(data[\"label\"].split(\":\")[0])\n\t\t\t\tif hour < 10:\n\t\t\t\t\tdata[\"label\"] = \" 0\" + str(hour) + \":00\"\n\t\t\t\telse:\n\t\t\t\t\tdata[\"label\"] = str(hour) + \":00\"\n\t\t\telif data[\"label\"].split(\" \")[1] == \"PM\":\n\t\t\t\tif data[\"label\"] == \"12:00 PM\":\n\t\t\t\t\tdata[\"label\"] = \" 12:00\"\n\t\t\t\telse:\n\t\t\t\t\thour = int(data[\"label\"].split(\":\")[0])\n\t\t\t\t\thour += 12\n\t\t\t\t\tdata[\"label\"] = \" \" + str(hour) + \":00\"\n\t\t\tload_data[\"x\"] = self.helper.getDateInEpoch(today + \" \" + data[\"label\"])\n\t\t\tload_data[\"y\"] = float(data[\"value\"])\n\t\t\tload_values.append(load_data)\n\t\t\tload_data = {}\n\n\t\treturn load_values", "async def fetch_hourly_data(self, day=None):\n self._logger.info(\"Fetching hourly data for %s\", day)\n await self._client.select_customer(self.account_id, self.customer_id)\n await self._client.select_customer(self.account_id, self.customer_id)\n\n if day is None:\n # Get yesterday\n yesterday = datetime.now() - timedelta(days=1)\n day_str = yesterday.strftime(\"%Y-%m-%d\")\n elif hasattr(day, \"strftime\"):\n day_str = day.strftime(\"%Y-%m-%d\")\n else:\n try:\n datetime.strptime(day, \"%Y-%m-%d\")\n except ValueError:\n print(\"Start date bad format. It must match %Y-%m-%d\")\n return\n day_str = day\n\n params = {\"dateDebut\": day_str, \"dateFin\": day_str}\n res = await self._client.http_request(HOURLY_DATA_URL_2, \"get\",\n params=params, )\n # We can not use res.json() because the response header are not application/json\n json_res = json.loads(await res.text())\n\n if len(json_res.get('results')) == 0:\n self._hourly_data[day_str] = {\n 'day_mean_temp': None,\n 'day_min_temp': None,\n 'day_max_temp': None,\n 'hours': {},\n }\n tmp_hour_dict = dict((h, {'average_temperature':None}) for h in range(24))\n else:\n self._hourly_data[day_str] = {\n 'day_mean_temp': json_res['results'][0]['tempMoyJour'],\n 'day_min_temp': json_res['results'][0]['tempMinJour'],\n 'day_max_temp': json_res['results'][0]['tempMaxJour'],\n 'hours': {},\n }\n tmp_hour_dict = dict((h, {}) for h in range(24))\n for hour, temp in enumerate(json_res['results'][0]['listeTemperaturesHeure']):\n tmp_hour_dict[hour]['average_temperature'] = temp\n\n raw_hourly_weather_data = []\n if len(json_res.get('results')) == 0:\n # Missing Temperature data from Hydro-Quebec (but don't crash the app for that)\n raw_hourly_weather_data = [None]*24\n else:\n raw_hourly_weather_data = json_res['results'][0]['listeTemperaturesHeure']\n\n params = {\"date\": day_str}\n res = await self._client.http_request(HOURLY_DATA_URL_1, \"get\", params=params)\n # We can not use res.json() because the response header are not application/json\n json_res = json.loads(await res.text())\n for hour, data in enumerate(json_res['results']['listeDonneesConsoEnergieHoraire']):\n tmp_hour_dict[hour]['lower_price_consumption'] = data['consoReg']\n tmp_hour_dict[hour]['higher_price_consumption'] = data['consoHaut']\n tmp_hour_dict[hour]['total_consumption'] = data['consoTotal']\n self._hourly_data[day_str]['hours'] = tmp_hour_dict.copy()\n\n #Also copy the raw hourly data from hydroquebec (This can be used later for commercial accounts, mostly 15 minutes power data)\n self._hourly_data_raw[day_str] = {\n 'Energy': json_res['results']['listeDonneesConsoEnergieHoraire'],\n 'Power': json_res['results']['listeDonneesConsoPuissanceHoraire'],\n 'Weather': raw_hourly_weather_data\n }", "def hourly_data():\n return generate_df_for_tests(freq=\"H\", periods=24 * 500)", "def load_data(city, month, day):\n\n if city == 'all':\n df = pd.read_csv(CITY_DATA['chicago'])\n df['city'] = 'chicago'\n ny = pd.read_csv(CITY_DATA['new york city'])\n ny['city'] = 'new york city'\n df = df.append(ny,sort = True)\n wa = pd.read_csv(CITY_DATA['washington'])\n wa['city'] = 'washington'\n df = df.append(wa,sort = True)\n else:\n df = pd.read_csv(CITY_DATA[city])\n df['city'] = CITY_DATA[city]\n\n df['Start Time'] = pd.to_datetime(df['Start Time']) #converts Start Time to datetime\n df['End Time'] = pd.to_datetime(df['End Time']) #converts End Time to datetime\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n #print(df) #DataFrame\n\n #filter by month\n if month != 'all':\n month_name = ['january', 'february', 'march', 'april', 'may', 'june','july','august','september','october','november','december']\n month_num = month_name.index(month) + 1\n df = df[df['month'] == month_num]\n #filter by day\n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n\n recs = df['Start Time'].count()\n\n return df, recs", "def _check_hour_data(self, ls_row):\n for attr in ['kt_re', 'kt_im', 'div', 'type', 'area_code']:\n self._check_datum(attr, getattr(ls_row, attr))", "def getHourlyPrecip(self, keyword):\n\t\tweather_data = self.getHourlyWeatherFromCSV(keyword, \"f\", \"precipitation\")\n\t\tprecip_values = [] # Array that will contain all the precipitation data\n\t\tprecip_data = {} # Dictionary of precipitation data\n\n\t\t# Getting precipiation data\n\t\tfor data in weather_data:\n\t\t\tprecip_data[\"x\"] = self.helper.getDateInEpoch(data[\"date\"])\n\t\t\tprecip_data[\"y\"] = float(data[\"precipitation\"][:-1])/100\n\t\t\tprecip_values.append(precip_data)\n\t\t\tprecip_data = {}\n\n\t\treturn precip_values", "def _load_data(self, datasource):\n import pandas as pd\n if not isinstance(datasource, pd.DataFrame):\n raise TypeError('DfFeature must loaded from pd.DataFrame')\n self.data = datasource\n self.data['thetime']=self.data['thetime'].apply(lambda x:try_to_parse_date(x))", "def get_data(self):\n data = list(IgnitionRow.objects.all().order_by('-pub_date')[:self.num_ticks].values())\n two_hours = data[::-1] # The most recent two hours of data\n# print([elem['avg_pot_5'] for elem in two_hours])\n# avg_pot_data = [[float(elem['avg_pot_{}'.format(key)]) / (int(key) / 100) for elem in two_hours]\n avg_pot_data = [[float(elem['avg_pot_{}'.format(key)]) for elem in two_hours] \n \tfor key in self.keys]\n# print(avg_pot_data[0][-5:])\n avg_pot_data = [[max(min(elem, 100),0) for elem in arr] for arr in avg_pot_data] # Assume a max pot size of 2000 BBs\n avg_pot_data = [[elem if elem != 100 else 0 for elem in arr] for arr in avg_pot_data] # Assume a max pot size of 2000 BBs\n# print(avg_pot_data[0][-5:])\n return avg_pot_data", "def hourly_data(self):\n return self._hourly_data", "def process_data(self, df_data, breakdown='weekly'):\n df_data['time'] = df_data['time'].apply(self.remove_time_zone)\n df_data['grid_coord'] = df_data['grid_coord'].astype(str)\n # return self.process_chunk((self.remove_time_zone('2019-04-15T00:00:00-04:00'), self.remove_time_zone('2019-04-16T00:00:00-04:00')), df_data)\n # get weekly/daily time chunks within cleanedInputData\n week_days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']\n start = min(df_data['time']) #str\n end = max(df_data['time']) #str\n start_date = iso8601.parse_date(start).replace(hour=0, minute=0, second=0)\n end_date = (iso8601.parse_date(end) + timedelta(days=1)).replace(hour=0, minute=0, second=0)\n if breakdown == \"weekly\":\n dates = pd.date_range(start_date, end_date, freq='W-'+week_days[start_date.weekday()])\n dates = [e.isoformat() for e in dates] + [end_date.isoformat()]\n else: # breakdown == \"daily\"\n dates = pd.date_range(start_date, end_date, freq='d')\n dates = [e.isoformat() for e in dates]\n time_chunks = []\n for left, right in zip(dates, dates[1:]):\n time_chunks.append((left, right))\n # return self.process_chunk(time_chunks[0], df_data)\n # parallelize processing between time chunks\n with Pool(cpu_count()) as p:\n ret_list = p.map(partial(self.process_chunk, df_data=df_data), time_chunks)\n return pd.concat(ret_list)", "def _get_hours_pro_entry(time_entries):\n events = []\n for event in time_entries:\n start_time = datetime.datetime(\n date.today().year,\n date.today().month,\n date.today().day,\n event.start_at.hour,\n event.start_at.minute,\n event.start_at.second,\n )\n end_time = datetime.datetime(\n date.today().year,\n date.today().month,\n date.today().day,\n event.finish_at.hour,\n event.finish_at.minute,\n event.finish_at.second,\n )\n\n timediff = end_time - start_time\n events.append(\n {\n \"worked_hours\": round(timediff.total_seconds() / 3600, DECIMALS_HOUR),\n \"event\": event,\n }\n )\n return events", "def get_hours(self, date = \"\"):\n\n if date == \"\":\n DATE = datetime.today()\n else:\n year, month, day = date.split('-')\n DATE = datetime(int(year), int(month), int(day))\n\n s = requests.get(\"https://api.wdpro.disney.go.com/facility-service/schedules/{}?date={}-{}-{}\".format(self.__id, DATE.year, self.__formatDate(str(DATE.month)), self.__formatDate(str(DATE.day))), headers=getHeaders())\n data = json.loads(s.content)\n\n operating_hours_start = None\n operating_hours_end = None\n extra_hours_start = None\n extra_hours_end = None\n\n try:\n for i in range(len(data['schedules'])):\n if data['schedules'][i]['type'] == 'Operating':\n operating_hours_start = datetime(DATE.year, DATE.month, DATE.day, int(data['schedules'][i]['startTime'][0:2]), int(data['schedules'][i]['startTime'][3:5]))\n if int(data['schedules'][i]['endTime'][0:2]) >= 0 and int(data['schedules'][i]['endTime'][0:2]) <= 7:\n DATETEMP = DATE + timedelta(days=1)\n operating_hours_end = datetime(DATETEMP.year, DATETEMP.month, DATETEMP.day, int(data['schedules'][i]['endTime'][0:2]), int(data['schedules'][i]['endTime'][3:5]))\n else:\n operating_hours_end = datetime(DATE.year, DATE.month, DATE.day, int(data['schedules'][i]['endTime'][0:2]), int(data['schedules'][i]['endTime'][3:5]))\n\n if data['schedules'][i]['type'] == \"Special Ticketed Event\":\n extra_hours_start = datetime(DATE.year, DATE.month, DATE.day, int(data['schedules'][i]['startTime'][0:2]), int(data['schedules'][i]['startTime'][3:5]))\n if int(data['schedules'][i]['endTime'][0:2]) >= 0 and int(data['schedules'][i]['endTime'][0:2]) <= 7:\n DATETEMP = DATE + timedelta(days=1)\n extra_hours_end = datetime(DATETEMP.year, DATETEMP.month, DATETEMP.day, int(data['schedules'][i]['endTime'][0:2]), int(data['schedules'][i]['endTime'][3:5]))\n else:\n operating_hours_end = datetime(DATE.year, DATE.month, DATE.day, int(data['schedules'][i]['endTime'][0:2]), int(data['schedules'][i]['endTime'][3:5]))\n\n except KeyError:\n pass\n return operating_hours_start, operating_hours_end, extra_hours_start, extra_hours_end", "def get_hourly_weather_details(self, hours: int = None):\n if hours is None:\n hours = 11\n forecast = super().get_weather_forecast(self.BASE_URL)\n headers = [\"date_time\",\n \"temp\",\n \"real_feel_temp\",\n \"wind_speed\",\n \"rain_probability\",\n \"cloud_cover\",\n ]\n for number in range(hours):\n data = []\n date_time = forecast[number]['DateTime']\n date_time = date_time[:16]\n date_time = date_time.replace('T', ' ')\n data.append(date_time)\n temp = round((int(\n forecast[number][\"Temperature\"][\"Value\"]) - 32) / 1.8)\n data.append(temp)\n real_feel_temp = round((int(\n forecast[number][\"RealFeelTemperature\"][\"Value\"]) - 32) / 1.8)\n data.append(real_feel_temp)\n wind_speed = forecast[number][\"Wind\"][\"Speed\"][\"Value\"]\n data.append(wind_speed)\n rain_probability = forecast[number][\"RainProbability\"]\n data.append(rain_probability)\n cloud_cover = forecast[number][\"CloudCover\"]\n data.append(cloud_cover)\n yield dict(zip(headers, data))", "def load_data(city, month, day):\n data_to_use = CITY_DATA[city]\n df = pd.read_csv(data_to_use)\n # drop rows containing NAN fields\n df2 = df.dropna()\n\n # Ensure the Start and End Time are Date\n pd.to_datetime(df2['Start Time'])\n pd.to_datetime(df2['End Time'])\n df = df2.sort_values(by='Start Time')\n\n # For each Start Time create additional columns to store year, month, day_of_week and hour\n # df['Start Year'] = df['Start Time'].apply(lambda x: get_part_of_datetime(x, 'year'))\n df['Start Month'] = df['Start Time'].apply(lambda x: get_part_of_datetime(x, 'month'))\n df['Start Day'] = df['Start Time'].apply(lambda x: get_part_of_datetime(x, 'day_of_week'))\n df['Start Hour'] = df['Start Time'].apply(lambda x: get_part_of_datetime(x, 'hour'))\n\n # filter month if month is not all\n if month.title() != 'All':\n df = df.loc[df['Start Month'] == month.title()]\n\n # filter day if day is not all\n if day.title() != 'All':\n df = df.loc[df['Start Day'] == day.title()]\n\n return df", "def load_data(city, month, day):\n# This code is refrenced from the practice problem on the project.\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week and hour from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n month = MONTHS.index(month) + 1\n df = df[ df['month'] == month ]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[ df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n \n start_time = time.time()\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n df['Start Time'] = pd.to_datetime(df['Start Time'], errors='coerce')\n\n # extract month, day of week and hour from Start Time to create new columns\n \n # Months will take values from 1 through 12\n df['month'] = df['Start Time'].dt.month \n \n # day of the week will take values in the range of 1 through 7\n df['day_of_week'] = df['Start Time'].dt.dayofweek \n \n # hour will take values from 0 through 23\n df['hour'] = df['Start Time'].dt.hour # range (0-23)\n\n # Here, we are filtering by month\n df['End Time'] = pd.to_datetime(df['End Time'])\n if month != 'all':\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1 \n\n df = df[df['Start Time'].dt.month == month]\n\n # Here, we are filtering by day of week\n if day != 'all': \n df = df[df['Start Time'].dt.weekday_name == day.title()]\n \n return df", "def ec_data_processor_precip(path, x='TIMESTAMP_END', y='LE', daily=True):\n\n\n # Get the data from the path and turn the path into a data frame\n # ec_dataset = pd.read_csv(path, header=2)\n\n ec_dataset = pd.read_csv(path, header=2, engine='python')\n\n # print ec_dataset.head()\n print ec_dataset['LE'].head()\n print ec_dataset[ec_dataset[y] != -9999].head()\n # === get rid of no data values in any category of the energy balance ===\n precip_dataset = ec_dataset[ec_dataset['P'] != -9999]\n ec_dataset = ec_dataset[ec_dataset[y] != -9999]\n ec_dataset = ec_dataset[ec_dataset['NETRAD'] != -9999]\n ec_dataset = ec_dataset[ec_dataset['H'] != -9999]\n ec_dataset = ec_dataset[ec_dataset['LE'] != -9999]\n # # You probably won't need these because Marcy Doesn't think they are valid for her towers\n # ec_dataset = ec_dataset[ec_dataset['SH'] != -9999]\n # ec_dataset = ec_dataset[ec_dataset['SLE'] != -9999]\n\n if x.startswith(\"TIMESTAMP\"):\n a = ec_dataset[x].apply(lambda b: dt.strptime(str(b), '%Y%m%d%H%M'))\n aa = precip_dataset[x].apply(lambda d: dt.strptime(str(d), '%Y%m%d%H%M'))\n\n # # TODO - if converting PRISM to MTN time.\n # # Convert to PRISM time (Mtn Standard + 5 hours) PRISM midnight is 12:00 UTC - 7 hours for mountain. Net +5 hrs\n # a = [i + timedelta(hours=19) for i in a]\n # aa = [i + timedelta(hours=19) for i in aa]\n\n\n else:\n a = ec_dataset[x]\n\n # ===== Time Series Processing =====\n\n timeseries = a\n p_timeseries = aa\n # print 'timeseries\\n', timeseries\n Rn = ec_dataset['NETRAD'].values\n H = ec_dataset['H'].values\n LE = ec_dataset['LE'].values\n P = precip_dataset['P']\n print 'P \\n', P\n # indexed_datetimes = pd.DataFrame(pd.DatetimeIndex(timeseries))\n\n # # testing\n # plt.plot(timeseries, P, color='black')\n # plt.show()\n\n # recreate a dataframe of the variables you want to time average on a monthly timestep\n halfhour_data = pd.DataFrame({'timeseries': timeseries, 'Rn': Rn, 'LE': LE, 'H': H}) # took out precip. no good vals? 'P': P\n\n halfhour_precip = pd.DataFrame({'timeseries': p_timeseries, 'P': P})\n # set the timeseries column to the index so groupby function can group by year and month of the index.\n halfhour_data = halfhour_data.set_index(pd.DatetimeIndex(halfhour_data['timeseries']))\n halfhour_precip = halfhour_precip.set_index(pd.DatetimeIndex(halfhour_precip['timeseries']))\n # convert latent heat to mmH2O by dividing by latent heat of vaporization.\n halfhour_data['mmh20'] = halfhour_data['LE'] * 7.962e-4\n\n if daily:\n\n daily_cum_data = halfhour_data.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()\n daily_cum_precip = halfhour_precip.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()\n\n # get each day in the timeseries. there are duplicates from the groupby function, so use set() to get rid of\n # duplicates\n daily_cum_time = daily_time_parse(timeseries)\n daily_cum_precip_time = daily_time_parse(p_timeseries)\n\n # # testing\n # daily_cum_data.to_csv('/Users/dcadol/Desktop/daily_cumulative_df.csv')\n\n # format daily_cum_data to have datetimes\n daily_cum_data['date'] = daily_cum_time\n daily_cum_precip['date'] = daily_cum_precip_time\n\n return daily_cum_data, daily_cum_precip", "def prep_data():\n # read data\n df = pd.read_csv(\"data/SF-crime-data_2016.csv\")\n\n # convert to datetime and extract hour\n df['datetime'] = pd.to_datetime(df[[\"Date\",\"Time\"]].apply(lambda x: x[0].split()[0] +\" \"+x[1], axis=1), format=\"%m/%d/%Y %H:%M\")\n df['hour'] = df['datetime'].dt.hour \n df.dropna(inplace=True)\n \n # filter out top 4 crimes\n top_4_crimes = df['Category'].value_counts()[:6].index.to_list()\n top_4_crimes\n top_4_crimes.remove(\"NON-CRIMINAL\")\n top_4_crimes.remove(\"OTHER OFFENSES\")\n\n return df[df[\"Category\"].isin(top_4_crimes)]", "def get_place_hours() -> (DataFrame, List[int]):\n df = pd.read_csv(\"data/chefmozhours4.csv\", encoding='utf-8')\n df.placeID = df.placeID.astype(int)\n df.drop_duplicates(subset=df.columns, keep='first', inplace=True)\n\n # create new dataframe\n columns = ['placeID', 'hours_week', 'hours_sat', 'hours_sun']\n new_df = DataFrame(columns=['placeID', 'hours_week', 'hours_sat', 'hours_sun'])\n new_df['placeID'] = new_df['placeID'].astype(int)\n last_place_id = None\n\n for row in df.itertuples():\n place_id = row[1]\n days = row[-1]\n total_time = timedelta()\n\n # extract time intervals\n intervals = row[2].split(';')\n intervals = list(filter(lambda x: len(x) > 0, intervals))\n\n # new placeID encountered\n if place_id != last_place_id:\n # append empty row to dataframe (placeID and empty hours)\n new_row = [place_id] + [0.] * (len(columns) - 1)\n new_df = new_df.append(pd.DataFrame([new_row], columns=columns), ignore_index=True)\n last_place_id = place_id\n\n # iterate over all opening times in one day\n for interval in intervals:\n try:\n start, stop = extract_time(interval)\n except AttributeError:\n continue\n\n # sum all time intervals in day\n total_time += stop - start\n\n if 'Mon' in days:\n column = 'hours_week'\n\n # time is specified for all days once possibly using more intervals\n if len(intervals) < 5:\n total_time *= 5\n\n total_time = total_time.total_seconds() / (seconds_in_day * 5)\n elif 'Sat' in days:\n column = 'hours_sat'\n total_time = total_time.total_seconds() / seconds_in_day\n elif 'Sun' in days:\n column = 'hours_sun'\n total_time = total_time.total_seconds() / seconds_in_day\n else:\n raise Exception(\"Row in table with wrong day!\")\n\n new_df.at[new_df.index.max(), column] = total_time\n\n error_rows = new_df.loc[\n (new_df['hours_week'] > 1.1)\n | (new_df['hours_sat'] > 1.1)\n | (new_df['hours_sun'] > 1.1)\n ]\n error_place_ids = error_rows.placeID.unique()\n\n # remove lines with errors\n new_df = new_df[~new_df.placeID.isin(error_place_ids)]\n\n return new_df", "def getHourlyWeatherFromCSV(self,town,scale,key):\n\n\t\t# Variables\n\t\tfile = \"data/weather/\"+town+\"_\"+scale+\".csv\"\n\t\tcsv_data = []\n\t\tweather_data = []\n\t\tweather = {}\n\n\t\t# Reading csv file and storing data in file\n\t\twith open(file) as csvfile:\n\t\t\treader = csv.DictReader(csvfile)\n\t\t\tfor row in reader:\n\t\t\t\tcsv_data.append(row) \n\t\t# Getting data that is needed for visualization\n\n\t\tprint csv_data\n\n\t\tfor data in csv_data:\n\t\t\t# Parsing date\n\t\t\thour = int(data[\"date\"].split(\" \")[4].split(\":\")[0])\n\t\t\tpm_or_am = data[\"date\"].split(\" \")[5]\n\t\t\tday = data[\"date\"].split(\",\")[0]\n\t\t\tif hour == 12 and pm_or_am == \"AM\":\n\t\t\t\tdata[\"date\"] = \"\".join(data[\"date\"].split(\" \")[:-2]) + \" 00:00\"\n\t\t\telif hour < 10 and pm_or_am == \"AM\":\n\t\t\t\tdata[\"date\"] = \"\".join(data[\"date\"].split(\" \")[:-2]) + \" 0\" + str(hour) + \":00\"\n\t\t\telif hour >= 10 and pm_or_am == \"AM\":\n\t\t\t\tdata[\"date\"] = \"\".join(data[\"date\"].split(\" \")[:-2]) + \" \" + str(hour) + \":00\"\n\t\t\tif pm_or_am == \"PM\":\n\t\t\t\tif hour == 12: \n\t\t\t\t\tdata[\"date\"] = \"\".join(data[\"date\"].split(\" \")[:-2]) + \" \" + str(hour) + \":00\"\n\t\t\t\telse:\n\t\t\t\t\thour +=12\n\t\t\t\t\tdata[\"date\"] = \"\".join(data[\"date\"].split(\" \")[:-2]) + \" \" + str(hour) + \":00\"\n\t\t\tweather[\"date\"] = data[\"date\"]\n\n\t\t\t# Appending weather data\n\t\t\tweather[key] = data[key]\n\t\t\tweather_data.append(weather)\n\t\t\tweather = {}\n\t\treturn weather_data", "def load_data(city_input, month_input, day_input):\n # Read csv for city_input using CITY_DATA dictionary to create df\n df = pd.read_csv(CITY_DATA[city_input])\n\n # Convert 'Start Time' and 'End Time' columns in df to datetime with pd.to_datetime function\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n\n # Include month number in df using dt.month\n df['Start Month'] = df['Start Time'].dt.month\n\n # Include weekday in df using dt.weekday_name - note its format, e.g. Monday\n df['Start Day'] = df['Start Time'].dt.weekday_name\n\n # Include hour in df using dt.hour\n df['Start Hour'] = df['Start Time'].dt.hour\n\n ## Month\n if month_input != 'all':\n # Create a list of months based on months indices using .index(element)\n MONTHS = ['january', 'february', 'march', 'april', 'may', 'june']\n # Python uses 0 indexing so we need to increase the values by 1 to correspond with month numbers\n month = MONTHS.index(month_input) + 1\n # Filter by month to create the new dataframe\n df = df[df['Start Month'] == month] # where month is the indexed version of the user input\n\n ## Day\n # Reformat day_input to Friday, for example\n day = day_input.title()\n\n if day != 'All':\n # Create a list of days\n DAYS = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday', 'Sunday', 'All']\n # Filter by day of week to create the new dataframe\n if day != 'All':\n df = df[df['Start Day'] == day]\n\n # Replace 'Trip Duration' with calculated version\n # This felt simpler than converting the number of seconds into days, hours, minutes, seconds ;)\n df['Trip Duration'] = df['End Time'] - df['Start Time']\n\n # print(df.head(20))\n return df", "def process_data(self, data):\n parsed = json.loads(data)\n current = parsed[\"current\"]\n future = parsed[\"hourly\"]\n current_res = self.get_columns(current)\n self.database_conn(\"current\", -1, current_res)\n for hour, data in enumerate(future):\n current_res = self.get_columns(data)\n self.database_conn(\"current\", hour, current_res)", "def get_hour_data(self, hour, param):\n\n try:\n hour_data = self.data[hour][param]\n return hour_data\n\n except KeyError:\n return np.full(self.lats.shape, np.nan)", "def _load_data(self):\n if self._api_response.status_code == 200:\n self._dataset = self._api_response.json()\n self._fill_day_dicts()", "def load_data(city, month, day):\n\n data = pd.read_csv(\"{}.csv\".format(city))\n data.drop(data.columns[0], axis = 1, inplace = True) #dropping this strange column\n\n data['Start Time'] = pd.to_datetime(data['Start Time'], format='%Y-%m-%d %H:%M:%S')\n data['End Time'] = pd.to_datetime(data['End Time'], format='%Y-%m-%d %H:%M:%S')\n\n data['weekday'] = data['Start Time'].dt.dayofweek #0 - monday\n data['month'] = data['Start Time'].dt.month #1 - january\n data['hour'] = data['Start Time'].dt.hour # 1 - hour 1\n\n day_dict = {\"Mon\":0, \"Tue\":1, \"Wed\":2, \"Thu\":3, \"Fry\":4, \"Sat\":5, \"Sun\":6}\n\n month_dict = {\"Jan\":1, \"Feb\":2, \"Mar\":3, \"Apr\":4, \"May\":5, \"Jun\":6}\n\n if month == 'all_months' and day != 'all_days': # filter just by day\n day = day_dict.get(day)\n df = data[data['weekday'] == day]\n elif day == 'all_days' and month != 'all_months': # filter just by month\n month = month_dict.get(month)\n df = data[data['month'] == month]\n elif day == 'all_days' and month == 'all_months': # no filters\n df = data\n else: # filter both by day and month\n day = day_dict.get(day)\n month = month_dict.get(month)\n df = data[(data['weekday']== day) & (data['month']==month)]\n return df", "def add_hour_data(self, hour, data):\n\n # Add data to self.data dictionary with the forecast hour as the key\n self.data[hour] = data\n return", "def load_data(city, month, day):\n #load the data of the specified city in a dataframe:\n df = pd.read_csv(CITY_DATA[city])\n\n #convert the type of data in 'Start Time' column to datetime:\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n #create new columns required to calculate time_stats:\n df['month'] = df['Start Time'].dt.month\n df['weekday'] = df['Start Time'].dt.day_name()\n df['hour'] = df['Start Time'].dt.hour\n\n #unless user input is all, filter by month:\n if month != 'all':\n month = months.index(month) + 1 #get the index of the month\n df = df[df['month'] == month]\n\n #uless user input is all, filter by weekday:\n if day != 'all':\n df = df[df['weekday'] == day.title()]\n\n\n return df.set_index(pd.Series([i for i in range(df.shape[0])])) #reset the indices of the filterd df", "def get_data(self):\n data = list(IgnitionRow.objects.all().order_by('-pub_date')[:self.num_ticks].values())\n two_hours = data[::-1]\n #two_hours = data\n num_players_data = [[max(min(elem['num_players_{}'.format(key)],50),0) for elem in two_hours] for key in self.keys]\n return num_players_data", "def load_data(city, month, day):\n # here i load the datak\n df=pd.read_csv(CITY_DATA[city])\n \n df['Start Time']=pd.to_datetime(df['Start Time'])\n \n df['month']=df['Start Time'].dt.month\n df['day_of_week']=df['Start Time'].dt.weekday_name\n df['hour']=df['Start Time'].dt.hour\n \n #filter by month\n if month!='all':\n month =months.index(month)+1\n df=df[df['month']==month]\n \n #filter by day of week\n if day!='all':\n df=df[df['day_of_week']==day.title()]\n \n return df", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n#changed 'weekday_name' to just 'weekday' which outputs the weekday as integer\n # extract month, day of week and hour from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['dow'] = df['Start Time'].dt.weekday\n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n month = months.index(month) + 1\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n# problem with the 'day'-filter, if a day (not 'all') is applied, the output is not right\n # filter by day of week if applicable\n if day != 'all':\n\n # filter by day of week to create the new dataframe\n day = days.index(day) + 1\n df = df[df['dow'] == day]\n\n return df", "def load_data(city, month, day):\n ##task0.4 now the user choices are all ready and saved, start preparing your data \n ## first, load the data of the selected city using the pd.read_csv command for the user specified city \n df = pd.read_csv(CITY_DATA[city])\n ## read the column start time as a date, to start extracting information from it\n df['Start Time']=pd.to_datetime(df['Start Time'])\n ##define a new column\"month\" as a function from start time, which reads the month from the date\n df['month'] = df['Start Time'].dt.month\n ## extract the day name from the start time, append a new column\"day_of_week\" to the dataframe \n df['day_of_week'] = df['Start Time'].dt.day_name()\n ## extract the hour of the start time and save to a new variable\"hour\" \n df['hour'] = df['Start Time'].dt.hour\n \n ## now, the panel data is complete with variables of interest created\n ## time to consider user filters on month and day!!\n ## for the month, if user choice is all, no changes to dataset\n ## if user specifies a month,filter the dataset column\"month\" with the selected value\n if month !=\"all\":\n df=(df[df['month']==int(month)])\n \n ## for the day column, if user doesnt choose a specific day and select\"all\", no change to df\n ## if the user chooses a day, filter the \"day_of_week\" column for the specified day\n if day !=\"All\":\n df=(df[df['day_of_week']==day])\n \n ## print the first five rows of data to confirm filters are applied\n ## return the data frame to start calculating statistics\n #print(df.head())\n return df", "def getLoadData(self):\n\n\t\t# Variables\n\t\turl = 'http://mis.nyiso.com/public/dss/nyiso_loads.csv' # Url with the data\n\t\tresponse = urllib2.urlopen(url) # Reading url\n\t\tload_data = csv.reader(response) # Convering data to csv format\n\t\tyear = self.helper.getYear() # Current Year\n\t\thourly_loads = [] # Stores the loads per hour\n\t\tdaily_loads = {} # Stores the loads per hour of a given day\n\t\tmonthly_loads = {} # Stores the loads per day of a given month\n\t\tyearly_loads = {} # Stores the monthly loads in a year\n\n\t\t# Converting data from csv to dictionary\n\t\tfor row in load_data:\n\n\t\t\t# Ignoring first row\n\t\t\tif row[1] != \"Month\" and row[2] != \"Day\" and row[3] != 'Hr1':\n\t\t\t\tmonth = int(row[1])\n\t\t\t\tday = int(row[2])\n\n\t\t\t\t# Getting hourly loads\n\t\t\t\tfor i in range(3,27):\n\t\t\t\t\ttry:\n\t\t\t\t\t\thourly_loads.append(int(row[i]))\n\t\t\t\t\t# If there is an error reading the load then generate a \n\t\t\t\t\t# random load value between 15000 and 25000\n\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\tpass\n\t\t\t\t\t\thourly_loads.append((randint(15000,25000)))\n\t\t\t\tdaily_loads[day] = hourly_loads\n\t\t\t\thourly_loads = []\n\t\t\t\tmonthly_loads[month] = daily_loads\n\t\t\t\tif self.helper.isEndOfMonth(month, day):\n\t\t\t\t\tdaily_loads = {}\n\n\t\tyearly_loads[year] = monthly_loads\n\n\t\treturn yearly_loads", "def test_data_preprocessing(raw_data): \r\n\r\n # get data output\r\n data_output = raw_data[['Submitby Date Time', 'Challenge Manager', 'Challenge Copilot', 'Posting Date Date', 'Track',\r\n 'Technology List', 'First Place Prize', 'Num Registrations', 'Total Prize']]\r\n with open('cache/extended_columns.pkl', 'rb') as f:\r\n extended_columns = pickle.load(f)\r\n with open('cache/num_date_columns.pkl', 'rb') as f:\r\n max_date_columns = pickle.load(f)\r\n \r\n data_output = class_binaryzation_for_test(data_output, extended_columns)\r\n try:\r\n data_output = date_separation1(data_output, max_num_columns=NUM_DATE_COLUMNS)\r\n except:\r\n data_output = date_separation2(data_output)\r\n data_output = money_digitalization(data_output)\r\n data_output = get_date_in_days(data_output)\r\n data_output['Days from Posting to Submit'] = data_output['Submitby Date Time Days from 2016'] \\\r\n - data_output['Posting Date Date Days from 2016'] \r\n\r\n return data_output", "def get_data(self):\n data = list(IgnitionRow.objects.all().order_by('-pub_date')[:self.num_ticks].values())\n two_hours = data[::-1]\n pct_flop_data = [[int(elem['pct_flop_{}'.format(key)]) for elem in two_hours] \n \tfor key in self.keys]\n pct_flop_data = [[min(elem, 100) for elem in arr] for arr in pct_flop_data] # Assume a max pot size of 2000 BBs\n return pct_flop_data", "def process_data(self):\n timer_start = time.time()\n # ensure self.df_events and self.df_locations are not None\n if self.df_events is None or self.df_locations is None:\n print(\"Missing data: either df_events or df_locations is None\")\n return\n # set start and end based on self.df_events if not already set\n if not self.start:\n self.start = self.df_events['event_time'].min()\n if not self.end:\n self.end = self.df_events['event_time'].max()\n print(f\"date range for events data is from {self.start} to {self.end}\")\n # create Grid object before processing any data\n grid = self.compute_grid_cells(self.df_locations)\n # clean and combine events and locations data\n df_data = self.combine_events_and_locations(grid)\n print(df_data.shape)\n # df_data.to_csv('../../../data_files/20210506_cleanedInputDataCumSum.csv', index=False)\n # df_data = pd.read_csv('../../../data_files/20210415_cleanedInputDataAprilCumSum.csv')\n # process data within grid class\n df_processed = grid.process_data(df_data, 'weekly')\n # df_processed = self.calculate_demand(df_processed)\n # df_processed.to_csv('../../../data_files/20210506_processedGridCellData.csv')\n # set df_demand to be df_processed\n df_processed.reset_index(inplace=True)\n df_processed = df_processed.astype({'date': 'str', 'avail_count': 'float', 'avail_mins': 'float', 'prob_scooter_avail': 'float', 'trips': 'float', 'adj_trips': 'float'})\n # make sure dates are within start and end dates\n start_date = str(iso8601.parse_date(self.start).date())\n end_date = str(iso8601.parse_date(self.end).date())\n df_processed = df_processed[(df_processed['date'] >= start_date) & (df_processed['date'] <= end_date)]\n self.set_demand(df_processed)\n timer_end = time.time()\n print('Elapsed time to process data:', (timer_end - timer_start)/60.0, 'minutes')", "def prepare_data(self,query,user=0,daytype=\"A\"):\n # check if input is a string, parse to an array\n if type(query) is str:\n query = geoutil.parse_coords_array(query)\n \n # load data\n if user == 0:\n data = taxi.loadCsv()\n else:\n data = taxi.loadRelated(user)\n \n # 0th row has only column names\n paths = list()\n\n # A for normal day, B for holiday, C for day before holiday\n # depending on the length of a query, set minimum length\n min_length = len(query)/2\n\n for x in range(1,len(data)):\n points = taxi.pointsListConverter(data[x][8])\n if data[x][6]==daytype and len(points)>min_length:\n # convert data to array of tuples of float values\n geopoints = geoutil.convert_points(points)\n if taxi.containing(geopoints,query):\n paths.append(geopoints)\n\n self.query = query\n # pass all paths and generate big array of float points\n try:\n allPoints = numpy.concatenate(paths)\n output_points = []\n for point in allPoints[:-1]:\n output_points.append({\n \"lat\":point[1],\n \"long\":point[0]\n })\n self.training = allPoints[:-1]\n return self.training,query\n except Exception as e:\n print(\"No paths found: \"+str(e))\n return [],query", "def extract_data():\n raw_data = pd.read_csv(\"../../../resource/DataVisualization/vaccinations.csv\")\n raw_data = raw_data[[\"location\", \"date\", \"people_fully_vaccinated_per_hundred\"]]\n raw_data.date = pd.to_datetime(raw_data.date, format=\"%Y-%m-%d\")\n min_date = raw_data.date.min()\n raw_data.date = raw_data.date-min_date\n raw_data.date = pd.Series([x.days for x in raw_data.date])\n raw_data.drop(raw_data.loc[raw_data.people_fully_vaccinated_per_hundred.isnull()].index,\n axis=0, inplace=True)\n raw_data[\"people_fully_vaccinated_per_hundred\"] /= 100\n\n data_dict = dict()\n for country in raw_data.location.unique():\n if len(raw_data.loc[raw_data.location == country]) >= 100:\n tmp_data = raw_data.loc[raw_data.location == country]\n tmp_data.drop(\"location\", axis=1, inplace=True)\n data_dict[country] = {\"data\":tmp_data}\n else:\n raw_data.drop(raw_data.loc[raw_data.location ==\n country].index, inplace=True)\n return data_dict, min_date, raw_data", "def _dataframe_preprocess(self):\n # 1. add baisc feature like date, time in day, ....\n if self.data_type != 'porto':\n self.df['TIMESTAMP'] = self.df.apply(lambda df: df['TIMESTAMPS'][0], axis=1)\n self.df['TIME'] = pd.to_datetime(self.df['TIMESTAMP'], unit='s', utc=True)\n \n self.df.TIME = self.df.TIME.dt.tz_convert(self.timezone)\n # 2. group df for specific driver analysis\n self.grouped_df = self.df.groupby('LABEL')\n if self.count_od_info:\n if 'SD' not in self.df.columns:\n self._add_OD_info()\n self.grouped_od = self.df.groupby('SD')", "def one_hour_ticker(*args):\n markets = fetch_markets()\n map(populate_one_hour_data, markets)\n return", "def getDataForLoadComparisons(self):\n\n\t\t# Variables\n\t\tload_data = self.getLoadData() \n\t\tvalues = [] \n\t\tinner_dict = {}\n\t\touter_dict = {}\n\t\tfinal_data = []\n\t\tyesterday = self.helper.getYesterday()\n\t\tkey = self.helper.getYear() + self.helper.getMonth() + self.helper.getDay() + \"-loadData\"\n\t\tdata = load_data[yesterday[0]][int(yesterday[1])][int(yesterday[2])]\n\t\tdates = (['12:00 AM','1:00 AM','2:00 AM','3:00 AM','4:00 AM','5:00 AM',\n\t\t\t'6:00 AM','7:00 AM','8:00 AM','9:00 AM','10:00 AM','11:00 AM',\n\t\t\t'12:00 PM','1:00 PM','2:00 PM','3:00 PM','4:00 PM','5:00 PM',\n\t\t\t'6:00 PM','7:00 PM','8:00 PM','9:00 PM','10:00 PM','11:00 PM'])\n\n\t\t# Populating values array\n\t\tfor i in range(0,len(data)):\n\t\t\tinner_dict['label'] = dates[i]\n\t\t\tinner_dict['value'] = data[i]\n\t\t\tvalues.append(inner_dict)\n\t\t\tinner_dict = {}\n\n\t\t# Populating the final_data array and returning it\n\t\touter_dict['key'] = key\n\t\touter_dict['values'] = values\n\t\tfinal_data.append(outer_dict)\n\n\t\treturn final_data", "def select_hours(self, lhours):\n sel = []\n for i in range(self.dataset.shape[0]):\n stime = time.localtime(np.int32(self.dataset[i][2]))\n hour = stime[3]\n for ih in lhours:\n ihour, fhour = ih\n if ihour <= hour < fhour:\n sel.append(i)\n data = STData(self.wpath, self.city, self.application)\n data.dataset = self.dataset[sel]\n return data", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month, day of week and hour from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n\n # filter_choosed by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = [\"january\", \"february\", \"march\", \"april\", \"may\", \"june\"]\n month = months.index(month) + 1\n\n # filter_choosed by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter_choosed by day of week if applicable\n if day != 'all':\n # filter_choosed by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def process_data(filename, skiprow=0):\n df = pd.read_csv(filename, encoding='big5', header=None, skiprows=skiprow)\n # drop 測站\n df.drop(1, axis=1, inplace=True)\n print('Data Loaded, preview:')\n print(df.head())\n\n data = {}\n # group data by date\n for name, ddf in df.groupby(0):\n date = [s.zfill(2) for s in name.split('/')]\n month = date[1]\n\n # drop the date\n ddf.drop(0, axis=1, inplace=True)\n\n # set index as the measure\n ddf.set_index(2, drop=True, inplace=True)\n\n # set column as month-day-hour\n ddf.columns = ['-'.join(date[1:]+[str(i).zfill(2)]) for i in range(24)]\n\n # concatenate\n if month in data:\n data[month] = pd.concat([data[month], ddf], axis=1)\n else:\n data[month] = ddf\n\n # sort the columns by datetime\n for key in data.keys():\n data[key] = data[key][data[key].columns.sort_values()]\n\n print('\\nShow data index:')\n print(data['01'].columns)\n\n return data", "def load_data(city, month, day):\n while month != \"\":\n # load data file into a dataframe\n filename = CITY_DATA[city]\n df = pd.read_csv(filename)\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n # df['day_of_week'] = df['Start Time'].dt.day_name()\n\n\n try: df['day_of_week'] = df['Start Time'].dt.weekday_name\n except: df['day_of_week'] = df['Start Time'].dt.day_name()\n else: df['day_of_week'] = df['Start Time'].dt.weekday\n \n \n \n df['hour'] = df['Start Time'].dt.hour\n\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n # months = ['january', 'february', 'march', 'april', 'may', 'june','july','august','september','october','november','december']\n month = int(months.index(month)) + 1\n \n # filter by month to create the new dataframe\n df = df.loc[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df.loc[df['day_of_week'] == day.title()]\n \n return df", "def load_hourly_demand(path):\n output = {}\n\n data = pd.read_csv(path)\n\n for idx, item in data.iterrows():\n\n hour = int(item['hour'])\n\n output[hour] = item['share']\n\n return output", "def get_hours():\n\n print('This program calculates fees paid per hour.Enter hour in H:m using the 24 hour format.')\n\n today = datetime.today()\n start_time = input('Enter time started in H:m format: ')\n end_time = input('Enter time ended in H:m format: ')\n task = input('Enter task name: ')\n description = input('Give a brief description of task: ')\n\n # start_time_str = start_time\n start_time = datetime.strptime(start_time, '%H:%M').time()\n end_time = datetime.strptime(end_time, '%H:%M').time()\n\n # print(start_time_object, end_time_object)\n\n time_elapsed = datetime.combine(\n datetime.today(), end_time) - datetime.combine(date.today(), start_time)\n total_seconds = time_elapsed.seconds\n # print(total_seconds)\n hours = total_seconds/3600\n\n print('Number of hours spent on task is ', hours, 'hours.')\n\n get_price(hours)\n save_to_csv(today, task, description, hours, start_time, end_time)", "def calc_hours(file):\n data = pd.read_csv(file, header=0)\n \n # tokenize & lemmatize\n steps = data['steps'].apply(lambda steps: tokenizer.tokenize(steps)).apply(lambda steps: [lemmatizer.lemmatize(word, pos='n') for word in steps])\n \n # find index of those containing hour\n target = steps.apply(lambda x: any(i in h for i in x))\n tar_ind = target[target].index\n \n # extract the word/number before hour\n hours = steps[tar_ind].apply(lambda x: [x[i-1:i+1][0] for i in range(len(x)) if x[i] in h]).rename(\"hours\")\n \n # standarize representation ('one' -> 1)\n # sum hours\n hours = hours.apply(lambda x: sum([standarize(i) for i in x if standarize(i)])).replace(0, np.NaN)\n \n # merge & save\n data = pd.merge(data, hours, how='left', right_on='hours', left_index=True, right_index=True)\n data.to_csv(file, header=True, index=False)", "def get_data(self):\n data = list(IgnitionRowPredictionOLS.objects.all().order_by('-pub_date')[:self.num_ticks].values())\n two_hours = data[::-1]\n num_players_data = [[elem['num_players_{}'.format(key)] for elem in two_hours] for key in self.keys]\n return num_players_data", "def _calc_time(time_lines: list) -> np.ndarray:\n time = [time_to_fraction_hour(line.split()[1]) for line in time_lines]\n return np.array(time)", "def load_data(city, month, day):\n try:\n df = pd.read_csv(CITY_DATA[city])\n \n df['Start Time'] = pd.to_datetime(df['Start Time'], errors='coerce')\n df['End Time'] = pd.to_datetime(df['End Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name \n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = MONTH_LIST.index(month) + 1 \n \n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()] \n\n return df\n except Exception as e:\n print('Couldn\\'t load the file, as an Error occurred: {}'.format(e))", "def load_data(city, month, day):\n try:\n df = pd.read_csv(CITY_DATA[city])\n \n df['Start Time'] = pd.to_datetime(df['Start Time'], errors='coerce')\n df['End Time'] = pd.to_datetime(df['End Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name \n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = MONTH_LIST.index(month) + 1 \n \n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()] \n\n return df\n except Exception as e:\n print('Couldn\\'t load the file, as an Error occurred: {}'.format(e))", "def data_preparation(self) -> None:\n self.logger.info('data cleaning')\n self.logger.info('num of secs: {}, num of ipo_dates: {}, num of secs with prices: {}'.format(\n len(self.data),\n len(self.ipo_dates),\n len(self.prices)\n ))\n excluded = []\n excluded = [i.lower() for i in excluded]\n self.logger.info(f'number of excluded: {len(excluded)}')\n for i in excluded:\n self.data.pop(i)\n for s in self.data:\n # columns with empty assets sum (empty columns and other situations)\n self.data[s].dropna(axis='columns', how='any', subset=['A_0'], inplace=True)\n # columns with descriptions (polish and english names of values)\n self.data[s].drop(self.data[s].columns[[0, 1]], inplace=True, axis=1)\n\n self.logger.info(f'number of secs after cleaning: {len(self.data)}')\n data_list = [k for k in self.data.values()]\n self.uber_data = pd.concat(data_list, ignore_index=True, axis=1)\n self.uber_data = self.uber_data.transpose()\n self.uber_data = self.uber_data.loc[:, pd.notnull(self.uber_data.columns)]", "def read_and_filter_data(self):\n if self.use_fake_data:\n self.df = HeatStrokeDataFiller.create_fake_test_data()\n return\n\n logger.info(\"Reading and cleaning data from file: %s...\" % os.path.basename(self.excel_file))\n self.df = pd.read_excel(self.excel_file, sheetname=self.spreadsheet_name)\n logger.debug(\"Fixing time fields...\")\n self.fix_time_fields()\n logger.debug(\"Filling missing data...\")\n self.fill_missing()\n logger.debug(\"Fixing fields...\")\n self.fix_fields()\n logger.debug(\"Filtering data features...\")\n self.filter_data()\n logger.debug(\"Generating negative data...\")\n self.make_and_append_negative_data()\n\n logger.debug(\"Casting to float...\")\n self.df = self.df.astype(float)", "def _get_time(self) -> None:\n self.data[\"time\"] = np.zeros(len(self.data[\"yyyymmdd\"]), dtype=object)\n \n for idx, (yyyymmdd, hhmmss) in enumerate(zip(self.data[\"yyyymmdd\"], self.data[\"hhmmss\"])):\n year, month, day = yyyymmdd.split(\"/\")\n hour, minute, second = hhmmss.split(\":\")\n self.data[\"time\"][idx] = datetime(int(year), int(month), int(day), int(hour), int(minute), int(second))\n \n del self.data[\"yyyymmdd\"]\n del self.data[\"hhmmss\"]", "def get_data(self):\n data = list(IgnitionRowPredictionCVX.objects.all().order_by('-pub_date')[:self.num_ticks].values())\n two_hours = data[::-1]\n num_players_data = [[elem['num_players_{}'.format(key)] for elem in two_hours] for key in self.keys]\n return num_players_data", "def interpolatehour(self, hourslice, starttime, data):\n\n times = self.times[hourslice]\n # Hacky solution for boundary crossers\n if times[1] == 0:\n times[0] -= 24\n if times[2] == 0:\n times[0] -= 24\n times[1] -= 24\n elif times[3] == 0:\n times[3] += 24\n times[4] += 24\n elif times[4] == 0:\n times[4] += 24\n f = PchipInterpolator(times,\n data,\n extrapolate=False,\n axis=0)\n\n return f(starttime)", "def get_hourly(self):\n pass", "def load_data(city, month, day):\n # Read csv file for city\n df = pd.read_csv(city)\n ## Make sure numbers/dates/etc. are treated appropriately for our needs\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n #df[['Start Time', 'End Time']] = df[['Start Time', 'End Time']].apply(pd.to_datetime)\n # Filter dates/times where applicable\n ## Define tuples for months/days so we can index as numerals\n month_tuple = ('january', 'february', 'march', 'april', 'may', 'june')\n day_tuple = ('monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday')\n if month != '':\n #Index months as integers; in this case we need to add 1 to each to convert them\n month_int = month_tuple.index(str(month.lower()))+1\n #Create new columns indicating the start and end months respectively\n #This preserves the original data while still allowing us to filter\n df['Start Month'] = df['Start Time'].dt.month\n df['End Month'] = df['End Time'].dt.month\n df = df[df['Start Month']== month_int]\n return df\n elif day != '':\n #Index days as integers\n day_int = day_tuple.index(str(day.lower()))\n #Create new columns indicating the start and end weekdays respectively\n #This preserves the original data while still allowing us to filter\n df['Start Weekday'] = df['Start Time'].dt.dayofweek\n df['End Weekday'] = df['End Time'].dt.dayofweek\n df = df[df['Start Weekday']== day_int]\n return df\n else:\n return df", "def _interpolate_meteorological_data(dset, data, rundate):\n rundate = datetime(rundate.year, rundate.month, rundate.day)\n for field, station in [(f, f[4:]) for f in data.keys() if f.startswith(\"met_\")]:\n log.debug(f\"Meteorological data available for station {station}\")\n\n met_time = data[field].pop(\"met_time\")\n flat_list = [item for sublist in met_time for item in sublist]\n met_time_float = np.array([(flat_list[i] - rundate).total_seconds() for i in range(0, len(flat_list))])\n met_time_unique, met_index = np.unique(met_time_float, return_index=True)\n\n diff = len(met_time_float) - len(met_time_unique)\n if diff > 0:\n log.dev(f\"Removed duplicate met data for station {station}\")\n log.dev(\"Do this for the actual obs data also!\")\n if len(met_time_unique) == 1:\n for met_type in data[field].keys():\n data[field][met_type] = np.repeat(data[field][met_type][0], dset.num_obs)\n continue\n\n # Extrapolation one month before/after\n # (this is overkill, most of these values will be removed later when taking the diagonal)\n min_time = min(met_time_unique) - 31 * 86400\n max_time = max(met_time_unique) + 31 * 86400\n met_time_unique = np.hstack((np.array(min_time), met_time_unique, np.array(max_time)))\n\n for met_type in data[field].keys():\n met_data_array = data[field][met_type]\n flat_list = [item for sublist in met_data_array for item in sublist]\n met_data_array = np.array([flat_list[i] for i in met_index])\n met_data_array = np.hstack((met_data_array[0], met_data_array, met_data_array[-1]))\n data[field][met_type] = interpolation.interpolate(\n met_time_unique, met_data_array, dset.obs_time, kind=\"cubic\"\n )\n\n return data", "def load_data(city, month, day ,city_num, month_num, day_num):\r\n try:\r\n df = pd.read_csv(CITY_DATA[city])\r\n df['Start Time'] = pd.to_datetime(df['Start Time'])\r\n df['End Time'] = pd.to_datetime(df['End Time'])\r\n df['month'] = df['Start Time'].dt.month\r\n df['day_of_week'] = df['Start Time'].dt.weekday_name\r\n df['hour'] = df['Start Time'].dt.hour\r\n\r\n # filter by month if applicable\r\n if month != 'all':\r\n df = df[df['month'] == month_num]\r\n # filter by day of week if applicable\r\n if day != 'all':\r\n # filter by day of week to create the new dataframe\r\n\r\n df = df[df['day_of_week'].str.contains(day.title())]\r\n return df\r\n except Exception as e:\r\n print('An exception has been occurred during loading data: {}'.format(e))", "def dataset_extract_features_from_date(dataset,date_feature): \n dataset['dayofmonth'] = dataset[date_feature].dt.day\n dataset['dayofyear'] = dataset[date_feature].dt.dayofyear \n dataset['dayofweek'] = dataset[date_feature].dt.dayofweek\n dataset['month'] = dataset[date_feature].dt.month\n dataset['year'] = dataset[date_feature].dt.year\n dataset['weekofyear'] = dataset[date_feature].dt.weekofyear\n dataset['is_month_start'] = (dataset[date_feature].dt.is_month_start).astype(int)\n dataset['is_month_end'] = (dataset[date_feature].dt.is_month_end).astype(int)\n return dataset", "def getHourlyTemp(self, keyword, scale):\n\n\t\tweather_data = self.getHourlyWeatherFromCSV(keyword, scale, \"temperature\")\n\t\ttemp_values = [] # Array that will contain all the temperature data\n\t\ttemp_data = {} # Dictionary of temperature data\n\n\t\t# Getting temperature data\n\t\tfor data in weather_data:\n\t\t\ttemp_data[\"x\"] = self.helper.getDateInEpoch(data[\"date\"])\n\t\t\ttemp_data[\"y\"] = float(data[\"temperature\"].split(\"°\")[0].split(\" \")[0])\n\t\t\ttemp_values.append(temp_data)\n\t\t\ttemp_data = {}\n\n\t\treturn temp_values", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.dayofweek\n\n return df", "def parse_weather(data: DataFrame) -> List[WeatherData]:\n parsed_results = []\n\n for index, row in data.iterrows():\n date = sqlite3.Date(index.year, index.month, index.day)\n item = WeatherData(\n date=date,\n average_temp=celsius_to_fahr(row.get('tavg', 0)),\n precipitation=row.get('prcp', 0),\n )\n parsed_results.append(item)\n return parsed_results", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA [city])\n \n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n \n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['hour'] = df['Start Time'].dt.hour\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n \n # Do the filter below\n # no filter is applied\n if month == 0 and day == 0:\n return df\n # only filter by day\n elif month == 0:\n df = df[df['day_of_week']==day]\n # only filter by month\n elif day == 0:\n df = df[df['month']== month]\n else:\n df = df[df['day_of_week']==day]\n df = df[df['month']== month]\n \n return df", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n # drop the unused 'Unnamed' column\n df = df.drop(\"Unnamed: 0\", axis=1)\n # convert the Start Time column to datetime\n df[\"Start Time\"] = pd.to_datetime(df[\"Start Time\"])\n # extract month, day of week and hour from Start Time to create new columns\n df[\"month\"] = df[\"Start Time\"].dt.month_name()\n df[\"day\"] = df[\"Start Time\"].dt.day_name()\n df[\"hour\"] = df[\"Start Time\"].dt.hour.astype(str)\n\n # filter by month if applicable\n if month != \"All\":\n # filter by month to create the new dataframe\n df = df.loc[df[\"month\"] == month]\n\n # filter by day of week if applicable\n if day != \"All\":\n # filter by day of week to create the new dataframe\n df = df.loc[df[\"day\"] == day]\n\n return df", "def hours(input=None):\n return get(input).hours", "def get_data(self):\n data = list(IgnitionRowPredictionTobit.objects.all().order_by('-pub_date')[:self.num_ticks].values())\n two_hours = data[::-1]\n num_players_data = [[elem['num_players_{}'.format(key)] for elem in two_hours] for key in self.keys]\n return num_players_data", "def _get_weather_data(self, lat, long):\n return {}\n try:\n # get the data\n forecast = self.ds.get_forecast(\n lat, long,\n exclude=[weather.HOURLY, weather.MINUTELY,\n weather.DAILY, weather.ALERTS, weather.FLAGS])\n\n # add lat & long to the hourly weather data for composite key in db\n data = forecast.currently\n data.latitude = lat\n data.longitude = long\n data = data.__dict__\n data.pop(\"time\")\n return data\n except Exception as e:\n print(e)\n return None", "def split_source_at_hours(self, hours):\n # This dictionary will be indexed by hours and refer to each source\n hourly_sources = {}\n for hour in hours:\n hourly_df = self.data[self.data.index.hour == hour]\n hourly_sources[hour] = Source(self.name, hourly_df,\n self.source_type)\n return hourly_sources", "def prepare_data(filename='data/DOT_timeSeries.csv'):\n\n # read data file into pandas dataframe\n df = pd.read_csv(filename)\n\n # extract unwanted 'countries' from dataframe\n countries = ['Europe', 'Emerging and Developing Europe', 'Emerging and Developing Asia',\n 'Middle East, North Africa, and Pakistan', 'Export earnings: nonfuel',\n 'Sub-Saharan Africa', 'Export earnings: fuel', 'Western Hemisphere',\n 'World', 'Special Categories', 'Advanced Economies', 'CIS',\n 'Emerging and Developing Economies']\n for country in countries:\n df = extract_relevant_rows(df, column_name='Country Name', column_value=country, not_equal=True)\n df = extract_relevant_rows(df, column_name='Counterpart Country Name', column_value=country, not_equal=True)\n\n # extract exports only from data\n exports = extract_relevant_rows(df, column_name='Indicator Code', column_value='TXG_FOB_USD')\n # extract value attributes only from exports\n export_values = extract_relevant_rows(exports, column_name='Attribute', column_value='Value')\n\n return export_values", "def DataLoader():\n #importing data\n House_Prices_Uncleaned = pd.read_csv(\"zillow_data/Zip_zhvi_uc_sfrcondo_tier_0.33_0.67_sm_sa_mon.csv\")\n #Cleaning house prices data\n\n House_Prices=pd.DataFrame(House_Prices_Uncleaned[\"RegionName\"][House_Prices_Uncleaned[\"CountyName\"]==\"New York County\"])\n\n House_Prices[\"Price\"]=pd.DataFrame(House_Prices_Uncleaned[\"2020-09-30\"])\n\n House_Rent_Uncleaned= pd.read_csv(\"zillow_data/Zip_ZORI_AllHomesPlusMultifamily_SSA.csv\")\n\n #Cleaning house rent data\n House_Rent=pd.DataFrame(House_Rent_Uncleaned[\"RegionName\"])\n House_Rent[\"Rent\"]=pd.DataFrame(House_Rent_Uncleaned[\"2020-09\"])\n\n return House_Prices, House_Rent", "def format_data(self, raw_data):\n opz = raw_data.copy()\n opz['datetime'] = pd.to_datetime(opz['Datum-tijd'], format='%Y-%m-%dT%H:%M:%SZ')\n opz.drop(['Datum-tijd'],axis=1, inplace=True)\n opz['dag']=opz['datetime'].dt.day\n opz['tijd'] = opz['datetime'].dt.time\n #voeg open/dicht data toe en bepaal momenten waarop dit wisselt\n opz['Opzetstuk Noord (°)'] = opz['Opzetstuk Noord (°)'].str.replace(',', '.').astype(float)\n opz['Opzetstuk Zuid (°)'] = opz['Opzetstuk Zuid (°)'].str.replace(',', '.').astype(float)\n opz['Opzetstuk Noord (°)'].fillna(opz['Opzetstuk Zuid (°)'], inplace=True)\n opz['Opzetstuk Zuid (°)'].fillna(opz['Opzetstuk Noord (°)'], inplace=True)\n return opz", "def hourly_table(self):\n htable = [0 for i in range(24)]\n for i in range(self.dataset.shape[0]):\n stime = time.localtime(np.int32(self.dataset[i][2]))\n evtime = stime[3]\n htable[evtime] += 1\n return htable", "def load_data(city, month, day):\n #create the DataFrame\n #I'll be honest, I was struggling with this bit of code so I searched the internet and found what I needed to get started.\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week and hour from Start Time to create new columns. New columns are needed for filtering.\n df['month'] = df['Start Time'].dt.month\n df['day'] = df['Start Time'].dt.day\n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n month = MONTHS.index(month) + 1\n df = df[ df['month'] == month ]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[ df['day'] == day.title()]\n\n return df", "def load_data(city, month, day):\n\n print(\"\\nThe program is loading the data for the filters of your choice.\")\n start_time = time.time()\n\n\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['Month'] = df['Start Time'].dt.month\n df['Weekday'] = df['Start Time'].dt.weekday_name\n df['Start Hour'] = df['Start Time'].dt.hour\n\n # filter the data according to month and weekday into two new DataFrames\n df = df[df['Month'] == (months.index(month)+1)]\n df = df[df['Weekday'] == day.title()]\n\n print('-'*40)\n\n return df", "def process(cls, path, ti, tf):\n data = cls(path, ti, tf)\n id0 = data.id0\n id1 = data.id1\n\n # new data representing first section of original data\n # time_new is adjusted to start at zero\n data.time = data.time[id0:id1 + 1] - data.time[id0:id1 + 1].min()\n data.tc1 = data.tc1[id0:id1 + 1]\n data.tc2 = data.tc2[id0:id1 + 1]\n data.tc3 = data.tc3[id0:id1 + 1]\n data.tc4 = data.tc4[id0:id1 + 1]\n\n return data", "def get_hours_by_weekday(self, cr, uid, tpl_id, day_no, context=None):\n\n delta = timedelta(seconds=0)\n tpl = self.browse(cr, uid, tpl_id, context=context)\n for worktime in tpl.worktime_ids:\n if int(worktime.dayofweek) != day_no:\n continue\n\n fromHour, fromSep, fromMin = worktime.hour_from.partition(':')\n toHour, toSep, toMin = worktime.hour_to.partition(':')\n if len(fromSep) == 0 or len(toSep) == 0:\n raise orm.except_orm(\n 'Invalid Data', 'Format of working hours is incorrect')\n\n delta += (\n datetime.strptime(toHour + ':' + toMin, '%H:%M') -\n datetime.strptime(fromHour + ':' + fromMin, '%H:%M')\n )\n\n return float(delta.seconds / 60) / 60.0", "def __process_data(self, dataF):\n\n dataF['created_time'] = pd.to_datetime(dataF['created_time'])\n dataF['char_cnt'] = dataF['message'].str.len()\n dataF['month'] = dataF['created_time'].dt.month\n dataF['week'] = dataF['created_time'].dt.week\n\n return dataF", "def full_load_hours(dh: DataHandler):\n gen_df = dh.get(\"o_supply\")\n gen_df.index.names = change_tec_lvl_name_to_alltec(gen_df.index.names)\n cap_df = dh.get(\"o_capa\")\n\n # In not all hours of the year are simulated, generation needs to be scaled by sc\n sc = 8760 / len(gen_df.index.get_level_values(\"t\").unique())\n cum_gen_df = gen_df.groupby([\"alltec\", \"allvin\", \"r\"], dropna=False).sum() * sc\n\n # Calculate full load hours\n flh_df = cum_gen_df.divide(cap_df)\n\n return flh_df", "def load_data(city, month, day):\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n days = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time']) # convert the Start Time column to datetime\n df['month'] = df['Start Time'].dt.month # extract month from start time to create a new column\n df['day_of_week'] = df['Start Time'].dt.day_name() # extract day from start time to create a new column\n\n if month in months and day == 'all': # filter the df only by month if applicable\n month = convert_to_int(months, month)\n df = df.loc[df['month'] == month]\n \n if month == 'all' and day in days : # filter the df only by day of week if applicable\n df = df.loc[df['day_of_week'] == day.title()]\n \n if month in months and day in days:\n # use the index of the months list to get the corresponding month's int\n month = convert_to_int(months, month)\n\n df = df.loc[df['month'] == month] # first filter the df by month\n df = df.loc[df['day_of_week'] == day.title()] # then filter the df by day of week\n\n return df # no filter applied", "def convertData(data):\n for candle in data['candles']:\n candle['date'],candle['time'] = convertToEST(candle['datetime'])\n\n return data", "def get_data(temp_estimate_source='historic'):\n\n print 'get data from files'\n load = process_load_data(datafoldername+loadfilename_train)\n load_test = process_load_data(datafoldername+loadfilename_test)\n temp = process_temp_data(datafoldername+tempfilename_train)\n holidays = process_holiday_data(datafoldername+holidayfilename)\n\n print 'merge load with temp data'\n X_train_df = load.merge(temp, on='datetime', how='left')\n X_test_df = load_test.merge(temp, on='datetime', how='left')\n\n print 'estimate missing temps'\n # find rows with missing temperatures\n missingtemp = X_test_df[X_test_df.isnull().any(axis=1)][['datetime', 'zone_id']].copy()\n\n # source estimates for missing periods\n if temp_estimate_source == 'arima':\n # use preprocessed arima estimates\n estimatedtemps = process_arima_temp_data(datafoldername+arimafilename)\n elif temp_estimate_source == 'actuals':\n # use actual temperatures - as provided after conclusion of kaggle competition\n estimatedtemps = process_temp_data(datafoldername+tempfilename_solution)\n else:\n # use means of historical temps at same day/time.\n estimatedtemps = get_estimated_temps(missingtemp[['datetime']].drop_duplicates(), temp)\n\n # merge estimates against missing rows, and use to update original dataframe in place\n replacementtemps = missingtemp.merge(estimatedtemps, left_on='datetime', right_on='datetime', how='left')\n replace_unknown_temps(X_test_df, replacementtemps)\n\n print 'merge in holiday dates'\n X_train_df = X_train_df.merge(holidays, on='datetime', how='left')\n X_train_df['holiday'].fillna(0, inplace=True)\n X_test_df = X_test_df.merge(holidays, on='datetime', how='left')\n X_test_df['holiday'].fillna(0, inplace=True)\n\n print 'add datetime categorical variables'\n X_train_df = add_datetime_categories(X_train_df)\n X_test_df = add_datetime_categories(X_test_df)\n\n return X_train_df, X_test_df", "def load_data(city, month, day):\n df=pd.read_csv(CITY_DATA[city])\n\n df['Start Time']=pd.to_datetime(df['Start Time'])\n\n df['month'] = df['Start Time'].dt.month\n\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n df['hour'] = df['Start Time'].dt.hour\n\n if month != 'all':\n months = ['january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december']\n month = months.index(month) + 1\n df = df[df['month'] == month]\n\n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def quater_hour_ticker(*args):\n markets = fetch_markets()\n map(populate_quater_hour_data, markets)\n return", "def collect_data(self,\n day_selection,\n exposure_schedule=[1.0],\n year_selection=[0],\n units=[\"SED\"],\n bin_width=None):\n\n # this subroutine handles keyword inputs (monthly, seasonal, etc)\n self.day_selection, self.day_input_flt, self.day_nonstring = str2daysofyear(day_selection)\n\n self.exposure_schedule = exposure_schedule\n\n self.year_selection = year_selection\n\n if units is not None :\n self.units = units\n \n if bin_width is not None :\n self.bin_width = bin_width\n\n self = self.interpret_parameters()\n\n ############################################################################\n\n lengths = {'day_selection' : len(self.day_selection),\n 'exposure_schedule' : len(self.exposure_schedule),\n 'year_selection' : len(self.year_selection),\n 'units' : len(self.units),\n 'bin_width' : len(self.bin_width)}\n\n self.num_hists = max(lengths.items(), key=lambda x: x[1])[1]\n assert all(x == self.num_hists or x == 1 for x in lengths.values()), (\n \"Inputs must be lists of length 1 or num_hists\")\n \n self.iterators = [x[0] for x in lengths.items() if x[1]==self.num_hists]\n\n\n self.hist_specs = []\n\n for i in range(self.num_hists) :\n hist_spec = {\n 'day_selection' : self.day_selection[0],\n 'exposure_schedule' : self.exposure_schedule[0],\n 'year_selection' : self.year_selection[0],\n 'units' : self.units[0],\n 'bin_width' : self.bin_width[0]}\n for x in self.iterators :\n hist_spec[x] = self.__dict__[x][i]\n self.hist_specs = self.hist_specs + [hist_spec]\n \n \n # find unique years to be loaded (probably all years but have to check)\n unique_years = set(self.year_selection[0])\n if len(self.year_selection) > 1 :\n for i in range(1,len(self.year_selection)) :\n unique_years.update(self.year_selection[i])\n unique_years = sorted(unique_years)\n\n # declare empty hists\n self.hists = [None for x in range(self.num_hists)]\n\n for i in range(len(unique_years)) :\n year = unique_years[i]\n print(\"Processing year \"+str(year)) #should use logging, don't yet know how\n dataset=nc.Dataset(self.src_directory+self.src_filename_format.replace('yyyy',str(year))) \n dataset.set_auto_mask(False) #to get normal arrays (faster than default masked arrays)\n\n if i == 0 :\n # TODO: this should also be done by some initial dataset analysis, but that's a drastic\n # design overhaul\n self.lat = dataset['lat'][:]\n self.lon = dataset['lon'][:]\n\n # now to determine the unique days for the specific year\n unique_days = set()\n for j in range(self.num_hists) :\n if year in self.hist_specs[j]['year_selection'] :\n unique_days.update(self.hist_specs[j]['day_selection'])\n unique_days = sorted(unique_days)\n\n # TODO: when metadata fixed, update this to actually interpret dates (cftime)\n # reformat to index for netCDF\n nc_day_sel = [False for i in range(365*24)] \n # reshape false array to have first dimension 24 (hours in day)\n nc_day_sel = assert_data_shape_24(nc_day_sel) \n # set the appropriate days as true\n nc_day_sel[:,np.array(unique_days)-1] = True \n # correct for leap years (skip feb 29)\n if year % 4 == 0 :\n nc_day_sel = np.concatenate(\n (nc_day_sel[:,0:59],np.full((24,1),False),nc_day_sel[:,59:]),axis=1)\n # flatten time_subset array back to one dimension\n nc_day_sel = nc_day_sel.flatten(order='F')\n\n #load data\n data_year = assert_data_shape_24(dataset['UV_AS'][nc_day_sel,:,:])\n\n #sort data into histograms\n for j in range(self.num_hists) :\n if year in self.hist_specs[j]['year_selection'] :\n sub_day_sel = [ True if x in self.hist_specs[j]['day_selection'] \n else False for x in unique_days ]\n temp_data = data_year[:,sub_day_sel,:,:]\n\n # Apply the exposure schedule, differently for doses vs intensity\n if self.hist_specs[j]['units'] in [\"SED\",\"J m-2\",\"UVIh\"] :\n # if calculating doses\n print(' Calculating doses')\n temp_data = np.sum(np.reshape(\n self.hist_specs[j]['exposure_schedule'],[24,1,1,1]) * temp_data,axis=0)\n # more complex when doing intensity\n else :\n # assume elsewise calculating intensity (i.e. UV-index) then limit data selection\n # to schedule (remembering that default schedule is just ones)\n print(' Slicing data with exposure schedule')\n # select only those hours with nonzero entry in exposure schedule\n temp_data = temp_data[self.hist_specs[j]['exposure_schedule'] != 0,:,:,:]\n # select nonzero values from exposure schedule\n exposure_schedule_nonzero = self.hist_specs[j]['exposure_schedule'][\n self.hist_specs[j]['exposure_schedule'] != 0]\n # if any nonzero entries aren't 1, multiply data accordingly\n if (exposure_schedule_nonzero != 1).any() :\n temp_data *= np.reshape(exposure_schedule_nonzero,[len(exposure_schedule_nonzero),1,1,1])\n # recombine first two dimensions (hour and day) back into time ready for histogram\n temp_data = assert_data_shape_24(temp_data,reverse=True) \n\n # now multiply data by conversion factor according to desired untis\n # TODO: Should expand upon this in reference files\n temp_data *= {\"SED\":0.9, \"J m-2\":90, \"UVIh\":1, \"UVI\":1, \"W m-2\":0.025, \"mW m-2\":25}[self.hist_specs[j]['units']]\n\n # if this is the first iteration, declare a hist\n if 'num_bins' not in self.hist_specs[j] :\n # seems like useful metadata to know bin n and edges\n self.hist_specs[j]['num_bins'] = int(np.nanmax(temp_data) // self.hist_specs[j]['bin_width'] ) + 2\n self.hist_specs[j]['bin_edges'] = (np.array(range(self.hist_specs[j]['num_bins']+1))\n - 0.5) * self.hist_specs[j]['bin_width'] \n # this form allows for weird custom bin edges, but probably will never use that\n self.hist_specs[j]['bin_centers'] = (self.hist_specs[j]['bin_edges'][:-1] \n + 0.5 * np.diff(self.hist_specs[j]['bin_edges']))\n\n # TODO: think about possible cases where dimensions could differ\n self.hists[j]=np.zeros([self.hist_specs[j]['num_bins'],\n np.shape(temp_data)[-2],np.shape(temp_data)[-1]], dtype=np.int16)\n\n else :\n new_num_bins = int(np.nanmax(temp_data) // self.hist_specs[j]['bin_width']) + 2 - self.hist_specs[j]['num_bins']\n # check if new data requires extra bins in pix_hist\n if new_num_bins > 0 :\n # append zeros to pix hist to make room for larger values\n self.hists[j] = np.concatenate((self.hists[j],np.zeros(\n [new_num_bins,np.shape(self.hists[j])[-2],np.shape(self.hists[j])[-1]],\n dtype=np.int16)),axis=0)\n # update bin information\n self.hist_specs[j]['num_bins'] = self.hist_specs[j]['num_bins'] + new_num_bins\n self.hist_specs[j]['bin_edges'] = (np.array(range(self.hist_specs[j]['num_bins']+1))\n - 0.5) * self.hist_specs[j]['bin_width'] \n self.hist_specs[j]['bin_centers'] = (self.hist_specs[j]['bin_edges'][:-1] \n + 0.5 * np.diff(self.hist_specs[j]['bin_edges']))\n\n # TODO: Add check in case bins get \"full\" (i.e. approach int16 max value)\n # now put data into hist using apply_along_axis to perform histogram for each pixel\n print(\" Calculating and adding to pixel histograms\")\n self.hists[j][:,:,:] += np.apply_along_axis(lambda x: \n np.histogram(x,bins=self.hist_specs[j]['bin_edges'])[0],0,temp_data)\n\n return self", "def half_hour_ticker(*args):\n markets = fetch_markets()\n map(populate_half_hour_data, markets)\n return", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start, end Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n # Calculate the travel time per trip and add that column to data frame.\n df['Travel Time'] = df['End Time'] - df['Start Time']\n\n # extract month and day of week from Start Time to create new columns\n df['Start Hour'] = df['Start Time'].dt.hour\n df['End Hour'] = df['End Time'].dt.hour\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['hour'] = df['Start Time'].dt.hour\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n df['month'] = df['Start Time'].dt.month\n df['day'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n \n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n df = df[df['month'] == month]\n\n if day != 'all':\n df = df[df['day'] == day.title()]\n\n return df", "def load_data(city, month, week_day):\n# name for day variable changed from \"day_name\" into week_day to take into account new pandas method \".day_name()\"\n# read in file form selected city\n df = pd.read_csv(CITY_DATA[city])\n# create additional columns for months, days, start/ end times, hours and station combinations\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n df['month_start'] = df['Start Time'].dt.month\n df['month_end'] = df['End Time'].dt.month\n df['day_start'] = df['Start Time'].dt.day_name()\n df['day_end'] = df['End Time'].dt.day_name()\n df['hour'] = df['Start Time'].dt.hour\n df['station_comb'] = df['Start Station'] + ' &AND& ' + df['End Station']\n# filter data file by month: capture start and end months\n if month != 7:\n df1 = df[df['month_start'] == month]\n df2 = df1.append(df[df['month_end'] == month])\n df = df2.drop_duplicates()\n# filter data file by day: capture start and end days\n if week_day != 'All':\n df3 = df[df['day_start'] == week_day]\n df4 = df3.append(df[df['day_end'] == week_day])\n df = df4.drop_duplicates()\n# reset index to facilitate looping in station_stats function\n df = df.reset_index()\n# check if user wants to check first data lines\n req_check_df = input('\\nIf you want to check the selected data please enter y.')\n if req_check_df[0:1].lower() == 'y':\n print('check df = \\n', df.head())\n wait = input('Press Enter to continue. ')\n\n return df", "def process_raw_data(self):\n \n # Define some variables of interest.\n vor = [\"n_sentences\", \"n_correct\", \"p_correct\", \"median_RT\", \\\n \"mean_RT\", \"stdev_RT\", \"scaled_stdev_RT\"]\n \n # Get all participant names, or return straight away if no data was\n # loaded yet.\n if hasattr(self, \"raw\"):\n participants = self.raw.keys()\n participants.sort()\n else:\n self.data = None\n return\n\n # Count the number of participants.\n n = len(participants)\n \n # Create a data dict for each variable of interest.\n self.data = {}\n self.data[\"ppname\"] = []\n for var in vor:\n self.data[var] = numpy.zeros(n, dtype=float) * numpy.NaN\n \n # Loop through all participants.\n for i, ppname in enumerate(participants):\n # Add the participant name.\n self.data[\"ppname\"].append(copy.deepcopy(ppname))\n # Skip empty datasets.\n if self.raw[ppname] is None:\n continue\n # Compute stuff relevant to this task.\n self.data[\"n_sentences\"][i] = len(self.raw[ppname][\"Sentence\"])\n self.data[\"n_correct\"][i] = numpy.sum(self.raw[ppname][\"correct\"])\n self.data[\"p_correct\"][i] = float(self.data[\"n_correct\"][i]) \\\n / float(self.data[\"n_sentences\"][i])\n self.data[\"median_RT\"][i] = numpy.nanmedian(self.raw[ppname][\"RT\"])\n self.data[\"mean_RT\"][i] = numpy.nanmean(self.raw[ppname][\"RT\"])\n self.data[\"stdev_RT\"][i] = numpy.nanstd(self.raw[ppname][\"RT\"])\n # Compute a scaled standard deviation of the response time, scaled to the\n # median response time to remove the correlation between the two.\n self.data[\"scaled_stdev_RT\"] = self.data[\"stdev_RT\"] / self.data[\"median_RT\"]", "def data(self):\n if self._data.empty:\n self._data = super().data\n\n max_data_year = self._data[\"year\"].max()\n\n # If the date range of the data doesn't line up with the year filters\n # for train/test data, we risk getting empty data sets\n if self.max_year != max_data_year:\n max_year_diff = pd.to_timedelta(\n [(YEAR_IN_DAYS * (self.max_year - max_data_year))]\n * len(self._data),\n unit=\"days\",\n )\n\n self._data.loc[:, \"date\"] = self._data[\"date\"] + max_year_diff\n self._data.loc[:, \"year\"] = self._data[\"date\"].dt.year\n self._data.set_index(\n [\"team\", \"year\", \"round_number\"], drop=False, inplace=True\n )\n\n return self._data", "def _task_data(self):\n output = {\n 'all': [],\n 'open': [],\n 'open_hours': 0,\n 'done': [],\n 'done_hours': 0,\n 'week_done': [],\n 'week_done_hours': 0,\n 'week_due': [],\n 'week_due_hours': 0,\n 'velocity': [],\n 'velocity_hours': 0,\n 'velocity_count': 0,\n }\n\n last_sunday = SUNDAY - timedelta(weeks=1)\n three_weeks_ago = MONDAY - timedelta(weeks=4)\n\n tasks = Task.originals.owner_id(self.pk).order_by('due_dt')\n for t in tasks:\n output['all'].append(t)\n # process open tasks\n if not t.completed:\n output['open'].append(t)\n output['open_hours'] += t.task_time\n\n # Process done tasks\n else:\n output['done'].append(t)\n output['done_hours'] += t.task_time\n if t.completed_dt >= three_weeks_ago and t.completed_dt <= last_sunday:\n output['velocity'].append(t)\n output['velocity_hours'] += t.task_time\n\n if t.due_dt >= MONDAY and t.due_dt <= SUNDAY:\n output['week_due'].append(t)\n output['week_due_hours'] += t.task_time\n\n if t.completed and t.completed_dt >= MONDAY and t.completed_dt <= SUNDAY:\n output['week_done'].append(t)\n output['week_done_hours'] += t.task_time\n\n output['all_hours'] = output['open_hours'] + output['done_hours']\n\n # Extra calcs for the velocity\n output['velocity_count'] = len(output['velocity'])\n\n if output['velocity_hours'] > 0:\n output['velocity_hours'] = round(output['velocity_hours']/3,2)\n if output['velocity_count'] > 0:\n output['velocity_count'] = round(Decimal(output['velocity_count'])/3,2)\n\n return output" ]
[ "0.6901085", "0.6247725", "0.6184876", "0.60731816", "0.6018868", "0.59925467", "0.59080493", "0.5836418", "0.581456", "0.57886934", "0.5735579", "0.5710588", "0.5708006", "0.56987065", "0.5696986", "0.5620522", "0.5603038", "0.55995053", "0.5593089", "0.5587772", "0.5583394", "0.5580247", "0.5562039", "0.5543191", "0.5533751", "0.55308527", "0.5529855", "0.5527259", "0.5524742", "0.550023", "0.5494539", "0.5494005", "0.5493558", "0.5489938", "0.5477355", "0.5449487", "0.5437104", "0.5430906", "0.54265726", "0.54158646", "0.54146904", "0.5409898", "0.54092234", "0.5409023", "0.5403214", "0.5393927", "0.5390392", "0.538013", "0.5377", "0.5367803", "0.53666025", "0.5354654", "0.5350708", "0.5345722", "0.53400487", "0.5323333", "0.5323333", "0.5321588", "0.530981", "0.5305685", "0.5299703", "0.5295153", "0.5289941", "0.52830607", "0.52748907", "0.5273096", "0.5245466", "0.5232653", "0.5232302", "0.5228809", "0.5223065", "0.521808", "0.52161026", "0.5206719", "0.52037096", "0.5190696", "0.51859176", "0.5183097", "0.51800257", "0.5176941", "0.5171839", "0.51704067", "0.51578164", "0.5155888", "0.5153843", "0.515237", "0.5150287", "0.51482594", "0.5147005", "0.513918", "0.5138616", "0.51377726", "0.513406", "0.5134006", "0.5130476", "0.5126571", "0.5123016", "0.5120318", "0.51186365", "0.5107738" ]
0.5502701
29
Get the datetimes from the excel file
def get_datetimes(file_name): csv_file = open(file_name, 'rb') file_content = csv.reader(csv_file) # ignore header file_content.next() datetimes = [] for row in file_content: datetimes.append(row[0]) csv_file.close() return datetimes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_dates(file,start,end):\r\n \r\n data = format_data(file)\r\n data = data.loc[start:end,:] \r\n dates = list(data.index)\r\n \r\n return dates", "def read_hours_from_worksheet(sheet_name):\n workbook_path = get_workbook_path()\n wb = openpyxl.load_workbook(workbook_path)\n ws = wb[sheet_name]\n\n # return a list of the datetime entries\n last_row = str(ws.max_row)\n dates_row = tuple(ws['A9':'A' + last_row])\n date_strings = []\n for row_of_cell_objs in dates_row:\n for date in row_of_cell_objs:\n date_strings.append(date.value)\n\n # return a list of the hours\n last_row = str(ws.max_row)\n hours_row = tuple(ws['B9':'B' + last_row])\n hours_list = []\n for row_of_cell_objs in hours_row:\n for hours in row_of_cell_objs:\n hours_list.append(hours.value)\n return date_strings, hours_list", "def get_events(path, sheet_index=0, key=\"SF\"):\r\n\r\n wb = xlrd.open_workbook(path)\r\n sheet = wb.sheet_by_index(sheet_index)\r\n events = []\r\n\r\n # Scan the excel file for all cells that contanin the key (\"SF\") and return them\r\n for i in range(sheet.nrows):\r\n for j in range(sheet.ncols):\r\n if (sheet.cell_value(i, j) == 'Date'):\r\n date_row = i\r\n if (sheet.cell_value(i, j) == key):\r\n events.append([sheet.cell_value(i, 0), str(parser.parse(sheet.cell_value(date_row, j)).date())])\r\n\r\n return events", "def get_raw_datetimes():\n raw_datetimes = []\n with open(RAW_DATETIMES_PATH, 'r') as f:\n for x in f.read().splitlines():\n try:\n raw_datetimes.append(datetime.datetime(year=int(x[1:5]), month=int(x[6:8]), day=int(x[9:11])))\n except ValueError:\n raw_datetimes.append('NA')\n return raw_datetimes", "def getTimeseries_from_file(self, path, file_type):\n self.path = path\n if not file_type or file_type.lower() not in ['csv', 'pickle']:\n raise ValueError('Either pickle or csv must be true.')\n elif file_type=='pickle':\n temp_dataframe = pd.read_pickle(self.path)\n else:\n temp_dataframe = pd.read_csv(self.path, index_col=0)\n self.dataframe = cleanDates(temp_dataframe.reset_index())\n self.dataframe.set_index(['DateTime'], inplace=True)\n self.sort_df()\n self.last_entry_date = self.dataframe.index.values[-1]\n return self.dataframe", "def date_list(self):\n if self._date_list is None or self._file_modified:\n with open(self.data_filepath, 'r', newline='') as reader:\n reader = csv.reader(reader)\n self._date_list = [DatePoint.unfreeze(date[0]) for date in reader]\n self._file_modified = False\n return self._date_list", "def dates(self):\n #{{{ function to return start and end times for a station\n return self.wfdates.keys()", "def get_exptimes( self ):\n return np.array([h['EXPTIME'] for h in self.headers])", "def list_dates(product):\n\n if product == 'analysis_assim':\n files = _list_files(product)\n dates = []\n for f in files:\n date = _date_from_filename(f)\n dates.append(date)\n dates = list(set(dates)) # Get unique dates\n else:\n template = (HS_DATA_EXPLORER_URI + 'files_explorer/get-folder-contents'\n '/?selection_path=%2Fprojects%2Fwater%2Fnwm%2Fdata%2F{0}'\n '%3Ffolder&query_type=filesystem')\n if 'long_range' in product:\n product = 'long_range'\n uri = template.format(product)\n response = urlopen(uri).read()\n dates = re.findall(r'\\>([0-9]+)\\<', response)\n return sorted(dates)", "def parse_data(filename):\n x, y = [], []\n with open(filename) as f:\n reader = csv.reader(f)\n for row in reader:\n x.append(datetime.strptime(row[1], DATE_FORMAT))\n y.append(row[0])\n\n return x, y", "def Dates(self):\n data = self.DictData()\n dates = [ row[ \"Date\"] for row in data ]\n return dates", "def _calc_date(time_lines) -> list:\n return time_lines[0].split()[0].strip(\"-\").split(\"-\")", "def get_arterial(file_path,category):\n book = xlrd.open_workbook(file_path)\n file_name = os.path.basename(file_path)\n year = str(20) + \"\".join([str(s) for s in file_name if s.isdigit()]) ## gets the year from filename\n Month = strptime(file_name[2:5],'%b').tm_mon ## gets month no\n mydate = datetime.date(int(year),Month, 1) ## first day of the month and year\n mydate_1 = mydate - datetime.timedelta(days=1) ## interested in last month of this year as data corresponds to last month and same year\n mydate_2 = mydate - datetime.timedelta(days=368) ## interested in last month of last year as data corresponds to last month and last year \n #monthid1 = str(mydate_1.strftime(\"%Y\")) + str(mydate_1.strftime(\"%m\")) ## 200706 for July 2007 file\n monthid2 = str(mydate_2.strftime(\"%Y\")) + str(mydate_2.strftime(\"%m\")) ## 200606 for July 2007 file\n try:\n if category.lower() == \"rural\":\n index = 3\n elif category.lower() == \"urban\":\n index = 4\n else:\n index = 5\n sheet = book.sheet_by_index(index)\n list_states = sheet.col_values(0)\n xstart = list_states.index('Connecticut')\n xend = list_states.index('TOTALS')\n #list1 = sheet.col_slice(colx= 8,start_rowx=xstart,end_rowx= xend - 1)\n #list1 = [w.value for w in list1]\n list2 = sheet.col_slice(colx= 9,start_rowx=xstart,end_rowx= xend - 1)\n list2 = [w.value for w in list2]\n list3 = sheet.col_slice(colx= 0,start_rowx=xstart,end_rowx= xend - 1)\n list3 = [w.value.lower() for w in list3] ## take lowercase for direct match later\n df = pd.concat([pd.DataFrame(list3),pd.DataFrame(list2)], axis = 1) # ,pd.DataFrame(list1)\n #col_name_1 = category + '_Arterial_' + monthid1\n col_name_2 = category + '_Arterial_' + monthid2\n df.columns = ['State', col_name_2 ] # col_name_1, \n df[col_name_2].replace('', np.nan, inplace=True) ## removes rows with blank records ( zonal categories)\n df['State'].replace('', np.nan, inplace=True)\n curr_monthid = str(mydate.strftime(\"%Y\")) + str(mydate.strftime(\"%m\")) ## 200707 for July 2007 file\n df['data_monthid'] = curr_monthid\n df.dropna(subset=[col_name_2], inplace=True)\n df.dropna(subset=['State'], inplace=True)\n df = df[~df.State.str.contains(\"subtotal\")] ### causes problems on joins, there in most files\n df = df[df.State != \"total\"] ## causes problems on joins, is there only in specific files\n df['State'] = df.State.str.strip() ## removes leading and lagging white spaces if any\n df2 = pd.melt(df,id_vars=['State','data_monthid'],var_name=['category'], value_name='Million_Vehicle_Miles')\n return df2\n except:\n print(\"error in file \",os.path.basename(file_path))", "def get_only_dates(filename):\n result = []\n with open(filename, \"r\") as file:\n for line in file.readlines():\n if \"-\" not in line:\n continue\n my_line = line.split(\" - \")\n date = my_line[0]\n day, month, year = date.split()\n day = day[:-2]\n if len(day) < 2:\n day = f'0{day}'\n month = get_month_number(month)\n result.append({\n \"date_original\": date,\n \"date_modified\": f\"{day}/{month}/{year}\"\n })\n return result", "def data(self):\n\n try:\n sheet = load_workbook(self.arquivo, read_only=True)\n act_sheet = sheet.active\n lines = act_sheet.rows\n if self.l1 != 0:\n lines = islice(lines, self.l1, None)\n data = []\n for line in lines:\n if isinstance(self.usecols, tuple):\n content = [line[value].value for value in self.usecols]\n else:\n content = [line[self.usecols].value]\n\n if content[0] is not None:\n data.append(content)\n\n except InvalidFileException:\n book = xlrd.open_workbook(self.arquivo)\n sheet = book.sheet_by_index(0)\n data = []\n for line in range(self.l1, sheet.nrows, 1):\n conteudo = [sheet.row(line)[value].value if isinstance(sheet.row(line)[value].value, float)\n else 0.0 for value in self.usecols]\n data.append(conteudo)\n\n return data", "def read_spectrograms_excel(filename):\n\n xl = pd.ExcelFile(filename)\n key = xl.sheet_names[0]\n df = pd.read_excel(xl, index_col=0)\n\n if df.index.dtype == pd.Timestamp:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d %H:%M:%S\")\n\n # Replace _ with \" \"\n key = \" \".join(key.split(\"_\"))\n\n return key, df", "def get_timestamps( self, raster_pos=None ):\n if raster_pos is None:\n headers = self.time_specific_headers\n else:\n headers = self.get_raster_pos_headers( raster_pos )\n \n return [to_epoch( from_Tformat( h['DATE_OBS'] ) ) for h in headers]", "def get_arterial(file_path,category):\n book = xlrd.open_workbook(file_path)\n file_name = os.path.basename(file_path)\n year = str(20) + \"\".join([str(s) for s in file_name if s.isdigit()]) ## gets the year from filename\n Month = strptime(file_name[2:5],'%b').tm_mon ## gets month no\n mydate = datetime.date(int(year),Month, 1) ## first day of the month and year\n #mydate_1 = mydate - datetime.timedelta(days=1) ## interested in last month of this year as data corresponds to last month and same year\n mydate_2 = mydate - datetime.timedelta(days=368) ## interested in last month of last year as data corresponds to last month and last year \n #monthid1 = str(mydate_1.strftime(\"%Y\")) + str(mydate_1.strftime(\"%m\")) ## 200706 for July 2007 file\n monthid2 = str(mydate_2.strftime(\"%Y\")) + str(mydate_2.strftime(\"%m\")) ## 200606 for July 2007 file\n try:\n if category.lower() == \"rural\":\n index = 3\n elif category.lower() == \"urban\":\n index = 4\n else:\n index = 5\n sheet = book.sheet_by_index(index)\n list_states = sheet.col_values(0)\n xstart = list_states.index('Connecticut')\n xend = list_states.index('TOTALS')\n #list1 = sheet.col_slice(colx= 6,start_rowx=xstart,end_rowx= xend - 1)\n #list1 = [w.value for w in list1]\n list2 = sheet.col_slice(colx= 7,start_rowx=xstart,end_rowx= xend - 1)\n list2 = [w.value for w in list2]\n list3 = sheet.col_slice(colx= 0,start_rowx=xstart,end_rowx= xend - 1)\n list3 = [w.value.lower() for w in list3] ## take lowercase for direct match later\n df = pd.concat([pd.DataFrame(list3),pd.DataFrame(list2)], axis = 1) # pd.DataFrame(list1),\n #col_name_1 = category + '_Arterial_' + monthid1\n col_name_2 = category + '_Arterial_' + monthid2\n df.columns = ['State', col_name_2 ] ## col_name_1,\n df[col_name_2].replace('', np.nan, inplace=True) ## removes rows with blank records ( zonal categories)\n df['State'].replace('', np.nan, inplace=True)\n curr_monthid = str(mydate.strftime(\"%Y\")) + str(mydate.strftime(\"%m\")) ## 200707 for July 2007 file\n df['data_monthid'] = curr_monthid\n df.dropna(subset=[col_name_2], inplace=True)\n df.dropna(subset=['State'], inplace=True)\n df = df[~df.State.str.contains(\"subtotal\")] ### causes problems on joins, there in most files\n df = df[df.State != \"total\"] ## causes problems on joins, is there only in specific files\n df['State'] = df.State.str.strip() ## removes leading and lagging white spaces if any\n df2 = pd.melt(df,id_vars=['State','data_monthid'],var_name=['category'], value_name='Million_Vehicle_Miles')\n return df2\n except:\n print(\"error in file \",os.path.basename(file_path))", "def parse_date(self) -> str:\r\n for line in self.lines:\r\n line = ''.join(line)\r\n if 'updated' in line:\r\n index = line.find('Last updated')\r\n if index != -1:\r\n substring = line[index + 10: index + 50].split('.')[0][-13:]\r\n print(substring)\r\n return pd.to_datetime(substring)\r\n if 'Scottish test n' in line:\r\n index_date = line.find('h test n')\r\n print(index_date)\r\n if index_date != -1:\r\n return pd.to_datetime(line[index_date+15:index_date+29])", "def read_times(self, slices=None):\n times = netCDF4.num2date(\n datetime.strptime(\n self.get_handler().SOURCE_START_DATE.split('.')[0],\n '%Y%m%d%H%M%S'\n )\n )\n return numpy.ma.array([times])", "def dates(self):\n pass", "def weather_reader_colab(filename, colab_files):\n data = pd.read_excel(io.BytesIO(colab_files[filename]), \n parse_dates=True, \n index_col='Time measured')\n return data", "def extract_dates(data):\n dates = []\n \n for line in data.splitlines():\n if line[6:8] == \"20\":\n dates.append(datetime.strptime(line[6:16], '%Y-%m-%d').date())\n \n return list(set(dates))\n pass", "def read_schedules(use, x):\n # read schedules from excel file\n occ = [x['Weekday_1'].values[:24], x['Saturday_1'].values[:24], x['Sunday_1'].values[:24]]\n el = [x['Weekday_2'].values[:24], x['Saturday_2'].values[:24], x['Sunday_2'].values[:24]]\n dhw = [x['Weekday_3'].values[:24], x['Saturday_3'].values[:24], x['Sunday_3'].values[:24]]\n month = x['month'].values[:12]\n\n if use == \"INDUSTRIAL\":\n pro = [x['Weekday_4'].values[:24], x['Saturday_4'].values[:24], x['Sunday_4'].values[:24]]\n else:\n pro = [np.zeros(24), np.zeros(24), np.zeros(24)]\n\n # read area per occupant\n area_per_occupant = x['density'].values[:1][0]\n\n return occ, el, dhw, pro, month, area_per_occupant", "def data_reader_colab(filename, colab_files):\n data = pd.read_excel(io.BytesIO(colab_files[filename]), \n parse_dates=True, index_col='Time', \n usecols=range(2))\n return data", "def FinConvExtractfromPickle(self, pickleLoc):\n df = pickle.load(open(pickleLoc, 'rb'))\n levels = df.dtypes.index.levels\n for l1 in levels[0]:\n for l2 in levels[1]:\n df[l1, l2, 'Date'] = df[l1,l2,'Date'].apply(ConvertExcelTime, convert_dtype=True)\n convertedLoc = os.path.splitext(pickleLoc)[0] + '_convertedDate.p'\n pickle.dump(df, open(convertedLoc, 'wb'))\n return convertedLoc", "def get_dates(raw_table) -> \"list of dates\":\n dates = []\n found_first = False\n for i, dstr in enumerate([raw_table[i][0] for i in range(0, len(raw_table))]):\n if dstr:\n if len(dstr.split(\"/\")) == 3:\n d = datetime.datetime.strptime(dstr, '%m/%d/%Y')\n elif len(dstr.split(\"-\")) == 3:\n d = datetime.datetime.strptime(dstr, '%Y-%m-%d')\n else:\n # Not necessarily an error, could just be a non-date cell\n logging.debug(\"unknown date-format: {}\".format(dstr))\n continue\n dates.append(d)\n if not found_first:\n found_first = True\n logging.debug(\"Found first date: '{}' at i: {}\".format(d.isoformat(), i))\n elif found_first:\n logging.debug(\"Last date: {}\".format(d))\n break\n return dates", "def _get_case_dates(self):\n path = \"//path/to/text/text()\"\n return [\n convert_date_string(date_string)\n for date_string in self.html.xpath(path)\n ]", "def _load_time_series(self, path: str) -> np.ndarray:\n items = []\n previous = None\n for item in sorted(pathlib.Path(path).glob(\"*.nc\")):\n with xr.open_dataset(item) as ds:\n current = ds.ocean_time.values[0].astype(\"datetime64[M]\")\n if (previous is not None\n and (current - previous != np.timedelta64(1, \"M\"))):\n raise ValueError(\"Time series not continuous\")\n items.append((current, str(item)))\n previous = current\n length = max(len(item[1]) for item in items)\n return np.array(\n items,\n dtype={\n \"names\": (\"date\", \"path\"),\n \"formats\": (\"datetime64[M]\", f\"U{length}\"),\n },\n )", "def as_data(self):\n wb = load_workbook(filename=self.file_path)\n ws = wb.active\n data = []\n\n for i, row in enumerate(ws.rows):\n if len(row) != self.max_columns:\n print(len(row))\n return []\n\n if i == 0:\n # skip file header\n continue\n\n try:\n d = self.get_row_dict(row)\n except Exception:\n return []\n\n data.append(d)\n\n return data", "def get_dates_list() -> List[str]:\n dates = listdir(\"hansard_gathering/processed_hansard_data\")\n return sorted([_file for _file in dates if not _file.endswith(\"_num\")])", "def read_data(path):\n df = pd.read_excel(path, 0)\n filtered_df = df.replace(np.nan, '-', regex=True)\n data = (filtered_df['Resumen'].tolist())\n return data", "def get_info():\r\n\r\n path = \"data.xlsx\" # change path depending on the name and location of the file\r\n xl_book = xlrd.open_workbook(path)\r\n xl_sheet = xl_book.sheet_by_index(0) # selects the first sheet in the spreadsheet\r\n emails = xl_sheet.col_values(1, 1) # emails are in second column\r\n names = xl_sheet.col_values(0, 1) # client names are in first column\r\n return emails, names", "def read_datetime_set(filename, seq_len):\n\tdate_set = []\n\twith open(os.path.join(info_path, \"squence_len_{}\".format(seq_len), filename), 'r') as f:\n\t\tfor line in f:\n\t\t\tnew_line = line.rstrip('\\n').split('\\t')\n\t\t\tdate_set.append([int(new_line[0]), int(new_line[1])])\n\treturn np.array(date_set)", "def readfile(self,filenm,delimiter):\n\n timefiledata = []\n timefileextra = {}\n fixtimeflag = False\n hour0=hour1=False\n\n\n if isinstance(filenm,(str,unicode)):\n timeobject = readTextFile(filenm,delimiter)\n else:\n timeobject = filenm\n\n\n for idx,lines in enumerate(timeobject):\n\n try:\n floatlines = map(float,lines)\n month,date,hour = floatlines[:3]\n dataval = floatlines[3:]\n ##Code to check if the first time value is 1.0. If so subtract every hour by 0.5\n\n if not hour1 or hour0:\n hour1 = hour\n if hour0==1.0 and hour1==2.0:\n #Go back to the array that stores timevalues and fix the hour and time stamp\n timefiledata[0]['h']=0.5\n timefiledata[0]['tstamp']=timeStamp(1,1,0.5)\n fixtimeflag=True\n\n if not hour0:\n hour0 = hour\n\n if fixtimeflag:\n hour = hour-0.5\n ##Code to check if the first time value is 1.0. If so subtract every hour by 0.5\n\n timestamp = timeStamp(month,date,hour)\n\n timefiledata.append({\"m\":month,\"d\":date,\"h\":hour,\"readStadicData\":dataval,\"tstamp\":timestamp})\n\n except ValueError:\n print(sys.exc_info())\n timefileextra[lines[0]]=lines[1]\n\n return (timefiledata,timefileextra)", "def _get_dates():\n remote = os.path.join(BASE_URL, RSS_FEED)\n local = os.path.join(TMP, RSS_FEED)\n u..(remote, local)\n\n with open(local) as f:\n return PUB_DATE.findall(f.read())", "def get_info(info_filename):\n with open(info_filename) as info_file:\n info_dict = csv.DictReader(info_file)\n info = {}\n for row in info_dict:\n info[row['path']] = datetime.datetime.strptime(row['start'],\n '%Y-%m-%d')\n return info", "def read_timesheet(path, date_format=DATE_FORMAT, input_time_units='h'):\n \n f = pd.read_csv(path, parse_dates=['date'], \n date_parser=lambda x: parse_date(x, DATE_FORMAT))\n f = f.sort_values('date')\n # Convert to hours\n convert_to_hours = build_convert_to_hours(input_time_units)\n f['duration'] = f['duration'].map(convert_to_hours)\n return f", "def ParseFile(self):\r\n ### ################\r\n imp = PrayerTimeExcelParser();\r\n\r\n # Import the timetable in the correct year\r\n # We can't import into the past\r\n now = datetime.datetime.now();\r\n curr_month = now.month;\r\n curr_year = now.year;\r\n if self.month < curr_month:\r\n imp.year = curr_year + 1;\r\n\r\n imp.month = self.month;\r\n imp.year = curr_year;\r\n\r\n try:\r\n mosque = Mosque.objects.get(id=self.mosque_id);\r\n except Exception as e:\r\n return False, \"No mosque was found with the given ID {}\".format(self.mosque_id);\r\n\r\n try:\r\n prayer_times = imp.ImportFromFile(self.file_path);\r\n except ParsingError as e:\r\n return False, \"Parsing Error: {}\".format(str(e));\r\n\r\n for idx, pt in enumerate(prayer_times):\r\n pt_model = PrayerTimes(mosque=mosque,\r\n date=datetime.date(imp.year, imp.month, idx+1),\r\n fajr_jamaa=pt[FajrPrayerName.name],\r\n duhr_jamaa=pt[DuhrPrayerName.name],\r\n asr_jamaa=pt[AsrPrayerName.name],\r\n maghrib_jamaa=pt[MaghribPrayerName.name],\r\n ishaa_jamaa=pt[IshaPrayerName.name]);\r\n pt_model.save();\r\n\r\n return True, \"Added {} days from the timetable\".format(len(prayer_times));", "def read_zip(filename='result.zip'):\n print('Reading data...')\n df = pd.read_csv(filename, compression='zip', low_memory=False)\n df['Date'] = pd.to_datetime(df['Date'], format='%d/%m/%Y')\n df['Time'] = pd.to_datetime(df['Time'], format='%H:%M')\n return df", "def ReadMetrics( fileName ):\n DataDF=pd.read_csv(fileName,header=0,delimiter=',',parse_dates=[0])\n DataDF = DataDF.set_index('Date')\n #print(DataDF.head())\n return( DataDF )", "def read_meteo_day(filename=None):\n if not filename:\n filename = settings.METEO_DAY_FILENAME\n return pd.read_csv(filename, sep=';', parse_dates=[0])", "def get_seviri_file_time(file):\n if hasattr(file, '__iter__'):\n filenames = [f.split('/')[-1] for f in file]\n date = [datetime(int(f[38:42]), int(f[42:44]),\n int(f[44:46]), int(f[46:48]),\n int(f[48:50])) for f in filenames]\n else:\n f = file.split('/')[-1]\n date = datetime(int(f[38:42]), int(f[42:44]),\n int(f[44:46]), int(f[46:48]),\n int(f[48:50]))\n return date", "def fileReader(pathtofile, dateheading, dtformat='%m/%d/%Y %H:%M', offset=0):\n # Read the files\n dlist = []\n if pathtofile.endswith('.csv'):\n dfr = read_csv(pathtofile)\n if pathtofile.endswith('.xlsx'):\n dfr = read_excel(pathtofile)\n else:\n dfr = read_pickle(pathtofile)\n\n # Parsing the Date column\n dfr.insert(loc=0, column='Dates',\n value=to_datetime(dfr[dateheading],\n format=dtformat) + DateOffset(hours=offset))\n\n dfr.drop(dateheading, axis=1, inplace=True) # Drop original Time column\n\n # Add df to the dlist\n dlist.append(dfr)\n\n return dlist", "def test_dates(self):\n result = export.processExport(houseId=1,\n startDate = datetime.datetime(2013, 01, 06) #5 Days\n )\n\n self.assertEqual(result.shape, (1440, 2))\n self.assertEqual(result.index[0], datetime.datetime(2013, 01, 06))\n self.assertEqual(result.index[-1], datetime.datetime(2013, 01, 10, 23, 55))\n\n\n #Stop at 00:00 on the 5th\n result = export.processExport(houseId=1,\n endDate = datetime.datetime(2013, 01, 05, 23, 55) #5 Days\n )\n\n self.assertEqual(result.shape, (1440, 2))\n self.assertEqual(result.index[0], datetime.datetime(2013, 01, 01))\n self.assertEqual(result.index[-1], datetime.datetime(2013, 01, 05, 23, 55))", "def parse_daily_dates(self, x, pattern, ext, rename=None,\n single_mode=False):\n datestring = self.scrub_string(x, pattern, ext, rename=rename)\n # Check if it's a monthly file or a daily one\n if len(datestring) == 7:\n date = datetime.datetime.strptime(datestring, '%Y%m')\n dates = [datetime.datetime(date.year, date.month, x) for x in\n range(1, calendar.monthrange(date.year, date.month)[1] +1)]\n return dates\n\n elif len(datestring) == 8:\n if single_mode:\n return datetime.datetime.strptime(datestring, '%Y%m%d')\n\n return [datetime.datetime.strptime(datestring, '%Y%m%d')]\n\n else:\n return None", "def read_stats_excel(filename):\n\n df_dict = {}\n xl = pd.ExcelFile(filename)\n\n for sh in xl.sheet_names:\n df = pd.read_excel(xl, sheet_name=sh, header=[0, 1, 2])\n\n # Use start date as index\n if df[\"End\"].dtypes.all() == pd.Timestamp:\n if \"File Number\" in df.columns:\n df = df.drop(\"File Number\", axis=1, level=0)\n df = df.drop(\"End\", axis=1, level=0)\n df = df.set_index(df.columns[0])\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d %H:%M:%S\")\n df.index.name = \"Date\"\n # Use file number as index\n else:\n df = df.drop([\"Start\", \"End\"], axis=1, level=0)\n df = df.set_index(df.columns[0])\n df.index.name = \"File Number\"\n\n df.columns.rename([\"channels\", \"stats\", \"units\"], inplace=True)\n df_dict[sh] = df\n\n return df_dict", "def load_timestamps(data_path):\n timestamp_file = os.path.join(data_path, 'oxts', 'timestamps.txt')\n\n # Read and parse the timestamps\n timestamps = []\n with open(timestamp_file, 'r') as f:\n for line in f.readlines():\n # NB: datetime only supports microseconds, but KITTI timestamps\n # give nanoseconds, so need to truncate last 4 characters to\n # get rid of \\n (counts as 1) and extra 3 digits\n t = datetime.datetime.strptime(line[:-4], '%Y-%m-%d %H:%M:%S.%f')\n timestamps.append(t)\n return timestamps", "def get_dates(self):\r\n return self.__dates", "def __get_times(self):\n data = self.simulate_file.readlines()\n data = list(map(str.strip, data))\n data = list(map(float, data))\n start = data[0]\n times = data[1:]\n return (start, times)", "def getDate(self):\n arr=[]\n dayarr=['Saturday','Sunday','Monday','Tuesday','Wednesday','Thursday','Friday']\n if self.debug =='yes':\n arr=self.debugdate\n else:\n \n hour = time.strftime('%H')\n month=time.strftime('%m')\n dayname=time.strftime('%A')\n monthname=time.strftime('%B')\n \n if time.strftime('%H') < '16':\n dayname=dayarr[int(time.strftime('%w'))]\n day = '%s' % str(int(time.strftime('%d'))-1)\n #print day\n if day == '0':\n month,day,monthname=self.EOM(month)\n if int(day) < 10:\n day= '0%s' % day\n else:\n day=day\n else:\n day = time.strftime('%d')\n \n arr.append(month)\n arr.append(day)\n arr.append(time.strftime('%Y'))\n arr.append(dayname)\n arr.append(monthname)\n #print arr\n return arr", "def read_spreadsheet():\n txtlines = open(DATA_FILEPATH).readlines()\n rows = list(csv.DictReader(txtlines))\n return rows", "def import_data(path, delimiter=' ', date_time_fmt='%Y-%m-%d %H:%M:%S', percent_fmt=less_than_ten_is_five):\n time_stamps = []\n percent = []\n\n with open(path) as data_file:\n reader = csv.reader(data_file, delimiter=delimiter)\n for row in reader:\n if len(row) > 2:\n date_time_str = ' '.join(row[:2]).split('.')[0] # ignore milliseconds\n percent_str = ' '.join(row[2:])\n \n time_stamps.append(dt.datetime.strptime(date_time_str, date_time_fmt))\n percent.append(percent_fmt(percent_str))\n \n return time_stamps, percent", "def get_start_end_times(frequency, fname):\n\n\n if fname.endswith('.nc'):\n\n ncfile = os.path.basename(fname)\n timestamp = ncfile.strip('.nc').split('_')[-1]\n\n # IF timestamp is of the form YYYYMMDDHHMM-YYYYMMDDHHMM\n if len(timestamp) == 25:\n start_time = datetime.date(int(timestamp[:4]), int(timestamp[4:6]), int(timestamp[7:8]))\n end_time = datetime.date(int(timestamp[-12:-8]), int(timestamp[-8:-6]), int(timestamp[-6:-4]))\n\n # IF timestamp is of the form YYYYMMDDHH-YYYYMMDDHH\n if len(timestamp) == 21:\n start_time = datetime.date(int(timestamp[:4]), int(timestamp[4:6]), int(timestamp[7:8]))\n end_time = datetime.date(int(timestamp[-10:-6]), int(timestamp[-6:-4]), int(timestamp[-4:-2]))\n\n if frequency == 'mon':\n start_time = datetime.date(int(fname[-16:-12]), int(fname[-12:-10]), 01)\n end_mon = fname[-5:-3]\n if end_mon == '02':\n end_day = 28\n elif end_mon in ['04', '06', '09', '11']:\n end_day = 30\n else:\n end_day = 31\n end_time = datetime.date(int(fname[-9:-5]), int(fname[-5:-3]), end_day)\n\n if frequency == 'day':\n start_time = datetime.date(int(fname[-20:-16]), int(fname[-16:-14]), int(fname[-14:-12]))\n end_time = datetime.date(int(fname[-11:-7]), int(fname[-7:-5]), int(fname[-5:-3]))\n else:\n start_time = datetime.date(1900, 1, 1)\n end_time = datetime.date(1999, 12, 31)\n\n return start_time, end_time", "def get_file_date(self, file: str) -> date:", "def birthday_wisher(xlsx_file):\n df = pd.read_excel(xlsx_file) \n hour = int(datetime.datetime.now().strftime('%H'))\n minute = int(datetime.datetime.now().strftime('%M'))\n today = datetime.datetime.now().strftime('%d-%m') \n yearNow = datetime.datetime.now().strftime('%Y') \n writeInd = [] \n for index,item in df.iterrows():\n bday = item['Birthday'].strftime('%d-%m')\n if bday==today and yearNow not in str(item['Year']):\n writeInd.append(index)\n pywhatkit.sendwhatmsg(f'+{item[\"Mobile No.\"]}',f'🎂🎂Happy Birthday {item[\"Name\"]}🎂🎂',hour,minute+1,wait_time=30)\n for i in writeInd:\n yr = df.loc[i,'Year']\n df.loc[i,'Year'] = str(yr)+','+str(yearNow)\n df.to_excel(xlsx_file,index=False)", "def timeStamps(dataset):\n \n timestamps = []\n \n for index, row in enumerate(dataset):\n try:\n timeObj = datetime.datetime.strptime(timeStampFix(row), '%y:%j:%H:%M:%S')\n except ValueError:\n print('Failed to create datetime object for ' + timeStampFix(row))\n timestamps.append(timeObj)\n \n return timestamps", "def get_dates(self, sr_df):\n return [\n date_obj.strftime(self.DATE_FORMAT) for date_obj in sr_df.index\n ]", "def get_events(filename):\n\tdf = pd.read_csv(filename)\n\t#get date from first entry (row) of DateUTC column\n\tdf['date'] = df['DateUTC<br />'][0].split(' ')[0]\n\t\n\t\n\t#drop the following columns\n\tdropLabels = ['FullMetar', 'DateUTC<br />', \\\n\t'Wind Direction','Gust SpeedMPH', \\\n\t'WindDirDegrees', 'Sea Level PressureIn', 'Dew PointF', \\\n\t'TemperatureF', 'Humidity','VisibilityMPH', \\\n 'Wind SpeedMPH', 'PrecipitationIn']\n\n\tdf.drop(labels=dropLabels,axis=1,inplace=True)\n\t\n\t#add hour column\n\ttimeLabel = df.columns.values[0] \n\tdf['Hour'] = pd.to_datetime(df[timeLabel]).dt.hour\n\t#drop timelabel column since we don't use anything beyond hour\n\tdf.drop(labels=timeLabel,axis=1,inplace=True)\n\n\treturn df", "def read_elia_cap(filename):\r\n df = pd.read_excel(filename,skiprows=0,parse_dates=False)\r\n \r\n #standaard datetime vorm omzetten\r\n df[\"Tendering Periodneww\"] = pd.to_datetime(df[\"Tendering Period\"],errors='coerce')\r\n df[\"Delivery Periodneww\"] = pd.to_datetime(df[\"Delivery Period\"],errors='coerce')\r\n \r\n #de 'weekvorm' van datetime omzetten\r\n df[\"Tendering Period\"]=df[\"Tendering Period\"].astype(str).map(lambda x: x.lstrip('W'))\r\n df[\"Delivery Period\"]=df[\"Delivery Period\"].astype(str).map(lambda x: x.lstrip('W'))\r\n df[\"Tendering Period\"]=df[\"Tendering Period\"].replace('\\s+', '_',regex=True)\r\n df[\"Delivery Period\"]=df[\"Delivery Period\"].replace('\\s+', '_',regex=True)\r\n df[\"Tendering Periodnew\"] = pd.to_datetime(df[\"Tendering Period\"][df[\"Delivery Periodneww\"].isnull()].astype(str).add('-1'), format=\"%W_%Y-%w\", errors='coerce')\r\n df[\"Delivery Periodnew\"] = pd.to_datetime(df[\"Delivery Period\"][df[\"Delivery Periodneww\"].isnull()].astype(str).add('-1'), format=\"%W_%Y-%w\", errors='coerce')\r\n \r\n #datumkolommen samenvoegen en overbodige kolommen verwijderen\r\n df[\"Tendering Period Combined\"] = df['Tendering Periodnew'].fillna(df['Tendering Periodneww'])\r\n df[\"Delivery Period Combined\"] = df['Delivery Periodnew'].fillna(df['Delivery Periodneww'])\r\n df=df.drop(columns=['Tendering Period', 'Delivery Period','Tendering Periodneww', 'Delivery Periodneww','Tendering Periodnew', 'Delivery Periodnew',])\r\n df.columns=[\"duration\",\"reserve type\",\"service type\",\"total contracted volume in MW\", \"average price in euro/MW/h\",\"forecasted average price in euro/MW/h\",\"total offered volume in MW\",\"tariff period\", \"symmetry type\", \"country\",\"tendering period\",\"delivery period\"] \r\n \r\n #multi index & sort\r\n df=df.set_index([\"delivery period\"])\r\n df=df.sort_index(ascending=False);\r\n \r\n return df", "def DATE_TO_XL(date_value):\n # If date_value is `naive` it's ok to pass tz to both DTIME as it won't affect the\n # result.\n return (DTIME(date_value) - DTIME(_excel_date_zero)).total_seconds() / 86400.", "def get_retired():\n retired = []\n cell_list = sheet.findall(\"RETIRE\", in_column=4)\n\n for i in cell_list:\n item = sheet.cell(i.row, i.col - 1).value\n retired.append(str(item))\n\n return retired", "def _csv_download(page):\n # gc = gspread.login(page.timetable.google_user, page.timetable.google_passwd)\n gc = googleoauth.authenticate_google_docs()\n csv_file = gc.open('WebValley2019')\n\n # gsession = gss.Client(page.timetable.google_user, page.timetable.google_passwd)\n # ss = gss.Spreadsheet(page.timetable.spreadsheet)\n # csv_file = gsession.download(ss, gid=page.timetable.spreadsheet_gid)\n # read = csv_file.read()\n read = csv_file.worksheet('TIMETABLE').get_all_values()\n # print \"csv\", read\n return read", "def get_timeseries():\n\n # generate the result files\n name = os.path.join(project.output_folder, project.scenario+'.sww')\n log.debug('get_timeseries: input SWW file=%s' % name)\n log.debug('get_timeseries: gauge file=%s' % project.gauge_file)\n anuga.sww2csv_gauges(name, project.gauge_file, quantities=project.layers_list,\n verbose=False)\n\n # since ANUGA code doesn't return a list of generated files,\n # look in output directory for 'gauge_*.csv' files.\n glob_mask = os.path.join(project.output_folder, 'gauge_*.csv')\n return glob.glob(glob_mask)", "def get_times(traj_num_str):\n times = []\n # Get timestamps of sequence\n times_file_path = \"./dataset/\" + traj_num_str.zfill(2) + \"/times.txt\"\n with open(times_file_path, \"r\") as fid:\n for i, line in enumerate(fid):\n times.append(float(line))\n return times", "def load_time(self) -> Tuple[np.ndarray, np.ndarray]:\n # filename = self.casedir / Path(\"times.txt\")\n filename = self.casedir\n assert filename.exists(), \"Cannot find {filename}\".format(filename=filename)\n return load_times(filename)", "def parse_data_from_file(path):\n print(path.stem)\n \n raw = path.stem.split('-')\n\n rawdate = raw[0][2:]\n print(rawdate)\n date = rawdate[6:] + \"/\" + rawdate[4:6] + '/' + rawdate[0:4]\n rawtime = raw[1]\n time = rawtime[0:2] + \"h\" + rawtime[2:4] + \"m\" + rawtime[4:6] + \"s\"\n dt = datetime.strptime(rawdate+rawtime, '%Y%m%d%H%M%S')\n print(dt)\n return dt", "def read_stops_data(path):\n stops = pd.read_excel(\n path,\n skiprows=[1, 2, 3],\n dtype={c: STOPS_COLUMNS[c][1] for c in STOPS_COLUMNS.keys()},\n )\n # Rename\n stops = stops.rename(\n mapper={c: STOPS_COLUMNS[c][0] for c in STOPS_COLUMNS.keys()}, axis=1\n )\n # Drop empty rows\n stops = stops[stops[\"stop_id\"].notnull()]\n # Set index to stop_id\n stops = stops.set_index(\"stop_id\")\n # Parse Swiss coordinates\n stops[\"lv95_e_coord\"] = stops[\"lv95_e_coord\"].str.replace(\",\", \"\").astype(\"Int64\")\n stops[\"lv95_n_coord\"] = stops[\"lv95_n_coord\"].str.replace(\",\", \"\").astype(\"Int64\")\n # Convert them to latitude & longitude\n transformer = Transformer.from_crs(\"epsg:2056\", \"epsg:4326\")\n stops[\"latitude\"] = np.array(\n [\n transformer.transform(stop[\"lv95_e_coord\"], stop[\"lv95_n_coord\"])[0]\n for _, stop in tqdm(\n stops.iterrows(), total=stops.shape[0], desc=\"Computing latitude \"\n )\n ]\n )\n stops[\"longitude\"] = np.array(\n [\n transformer.transform(stop[\"lv95_e_coord\"], stop[\"lv95_n_coord\"])[1]\n for _, stop in tqdm(\n stops.iterrows(), total=stops.shape[0], desc=\"Computing longitude \"\n )\n ]\n )\n return stops", "def read(self):\n with open(self) as f:\n return Timestamp.load(f)", "def get_obsdate():\n\n#\n#--- read sot data\n#\n f = open(sot_directory, 'r')\n data = [line.strip() for line in f.readlines()]\n f.close()\n\n obsid_list = []\n start_date = []\n index_date = []\n for ent in data:\n temp = re.split('\\^', ent)\n obsid = temp[1]\n#\n#--- check the data are valid\n#\n try:\n atemp = re.split('\\s+', temp[13])\n mon = atemp[0]\n date = atemp[1]\n year = atemp[2][2] + atemp[2][3]\n except:\n continue\n#\n#--- convert month in letter into digit\n#\n for i in range(0, 12):\n if mon == month_list[i]:\n mon = i + 1\n break\n#\n#--- two forms of starting date: 05/23/14 and 20140523\n#\n lmon = str(mon)\n if int(mon) < 10:\n lmon = '0' + lmon\n ldate = str(date)\n if int(date) < 10:\n ldate = '0' + ldate\n\n dline = lmon + '/' + ldate + '/' + year\n iline = atemp[2] + lmon + ldate\n\n obsid_list.append(int(obsid))\n start_date.append(dline)\n index_date.append(iline)\n\n return (obsid_list, start_date, index_date)", "def read_data(self, filepath, agg_type):\n\n data = pd.read_excel(filepath)\n data['date'] = data.activity_date.apply(lambda x: x.date())\n data['year'] = data.date.apply(lambda x: x.year)\n data['month'] = data.date.apply(lambda x: x.month)\n data['week'] = data.date.apply(lambda x: x.strftime('%U'))\n data['dom'] = data.date.apply(lambda x: x.day)\n data['day'] = data.date.apply(lambda x: x.isoweekday())\n data['last_dom'] = data.date.apply(lambda x: calendar.monthrange(x.year, x.month)[1])\n data['is_last_dom'] = np.where(data.dom == data.last_dom, 1, 0)\n data['quarter'] = data.month.apply(lambda x: (x-1) // 3 + 1)\n data['is_last_day_of_quarter'] = np.where(((data.month == 3) & (data.dom == 31)) | ((data.month == 6) & (data.dom == 30)) |\n ((data.month == 9) & (data.dom == 30)) | ((data.month == 12) & (data.dom == 31)), 1, 0)\n return data", "def lectxl(NOM):\n #NOM=input(\"nom du fichier:\")#interactif\n #NOM=str(NOM +\".xlsx\")\n workbook = xlrd.open_workbook(NOM)\n SheetNameList = workbook.sheet_names()\n worksheet = workbook.sheet_by_name(SheetNameList[0])\n num_rows = worksheet.nrows \n f=[NOM]\n for curr_row in range(0,num_rows):\n row = worksheet.row(curr_row)\n f.append(row)\n return f", "def extract_datetime(fpath):\n try:\n handle = open(fpath, 'rb')\n if hexlify(handle.read(2)) != hexlify(u'MZ'):\n handle.close()\n return\n except:\n return\n\n try:\n handle.seek(60, 0)\n offset = handle.read(4)\n offset = hexlify(offset[::-1])\n\n if offset == '':\n handle.close()\n return\n\n offset = int(offset, 16)\n handle.seek(offset+8, 0)\n dword = handle.read(4)\n handle.close()\n\n t = unpack(\">L\", dword[::-1])[0]\n except:\n return\n return datetime.datetime.fromtimestamp(t)", "def normalize_excelSheet(self, sheetname, conversion_dictionary):\n\n sheet = self.wb.sheet_by_name(sheetname)\n\n ami_data = []\n\n date_headers = [\"bibliographic.date\", \"technical.dateCreated\"]\n time_headers = [\"technical.durationHuman\"]\n\n #copy everything from the 3rd row to the last row with a filename\n for rownum in range(2, sheet.nrows):\n if sheet.cell(rownum, 0):\n ami_data.append(sheet.row_values(rownum))\n\n for i in range(0, sheet.ncols):\n #normalize header\n header_entry = self.get_headerEntryAsString(sheetname, i)\n ami_data[0][i] = self.normalize_headerEntry(\n header_entry,\n conversion_dictionary)\n\n #convert excel dates\n if ami_data[0][i] in date_headers:\n for j in range(3, sheet.nrows):\n if sheet.cell(j, i).ctype == 3:\n value = sheet.cell(j, i).value\n ami_data[j-2][i] = self.convert_excelDateTime(value, \"date\")\n\n #convert excel times\n if ami_data[0][i] in time_headers:\n for j in range(3, sheet.nrows):\n if sheet.cell(j, i).ctype == 3:\n value = sheet.cell(j, i).value\n ami_data[j-2][i] = self.convert_excelDateTime(value, \"time\")\n\n ami_df = self.normalize_values(ami_data)\n\n return ami_df", "def load_transaction_time(t_start_date, n_range, col_names):\n\tprint(\"Loading transaction data...\")\n\tfile_names = np.array(glob.glob(\"*.txt\"))\n\n\treg_month = str(t_start_date.year) + \"_\" + str(t_start_date.month).zfill(2)\n\n\tname_file = np.array([x for x in file_names if bool(re.search(reg_month,x))][0])\n\tindex_file = np.where(file_names == name_file)[0][0]\n\n\tif index_file-n_range < 0:\n\t\tn_range = 0\n\n\tindex_range = np.array(np.arange(index_file-n_range, index_file+1))\n\tfile_list = file_names[index_range]\n\tdata_trx = pd.concat((pd.read_csv(f, header=None) for f in file_list))\n\tdata_trx = data_trx.reset_index(drop=True)\n\tdata_trx = data_trx.fillna(\"\")\n\tdata_trx = data_trx.iloc[:, 0:12]\n\tdata_trx.columns = col_names\n\tprint(\"...loaded\")\n\n\treturn data_trx", "def get_start_end_times(frequency, fname):\n\n if fname.endswith('.nc'):\n\n ncfile = os.path.basename(fname)\n timestamp = ncfile.strip('.nc').split('_')[-1]\n\n # IF timestamp is of the form YYYYMMDDHHMM-YYYYMMDDHHMM\n if len(timestamp) == 25:\n start_time = datetime.date(int(timestamp[:4]), int(timestamp[4:6]), int(timestamp[7:8]))\n end_time = datetime.date(int(timestamp[-12:-8]), int(timestamp[-8:-6]), int(timestamp[-6:-4]))\n\n # IF timestamp is of the form YYYYMMDDHH-YYYYMMDDHH\n if len(timestamp) == 21:\n start_time = datetime.date(int(timestamp[:4]), int(timestamp[4:6]), int(timestamp[7:8]))\n end_time = datetime.date(int(timestamp[-10:-6]), int(timestamp[-6:-4]), int(timestamp[-4:-2]))\n\n if frequency == 'mon':\n start_time = datetime.date(int(fname[-16:-12]), int(fname[-12:-10]), 01)\n end_mon = fname[-5:-3]\n if end_mon == '02':\n end_day = 28\n elif end_mon in ['04', '06', '09', '11']:\n end_day = 30\n else:\n end_day = 31\n end_time = datetime.date(int(fname[-9:-5]), int(fname[-5:-3]), end_day)\n\n if frequency == 'day':\n start_time = datetime.date(int(fname[-20:-16]), int(fname[-16:-14]), int(fname[-14:-12]))\n end_time = datetime.date(int(fname[-11:-7]), int(fname[-7:-5]), int(fname[-5:-3]))\n else:\n start_time = datetime.date(1900, 1, 1)\n end_time = datetime.date(1999, 12, 31)\n\n return start_time, end_time", "def GetTimezones():\n return GetDataFromCsvFile('timezones.csv')", "def extract_cell_data(sheet, rows, cols):\n extract = []\n for row in rows:\n text = []\n for col in cols:\n cell_name = col + str(row)\n cell_value = str(sheet[cell_name].value).strip()\n cell_value = sheet[cell_name].value\n \n if isinstance(cell_value, datetime.datetime):\n cell_value = cell_value.strftime(\"%Y-%m-%d\")\n elif cell_value is None:\n continue\n else:\n cell_value = str(cell_value).strip()\n \n text.append(cell_value)\n text = \" \".join(text)\n if text:\n extract.append(text)\n else:\n continue\n if extract:\n return \" \".join(extract)\n else:\n return \"\"", "def read_excel_file(self):\n self.df = pd.read_excel(str(self.file_path))\n self.data_mat=np.array(self.df).astype(float).transpose()", "def get_dates(data, case_type):\n return np.asarray(data[case_type].columns[4:])", "def get_texts_from_Excel(file_name_excel, corpus_dir):\n #Creates an object of type Book from xlrd.book object\n try:\n wb = xlrd.open_workbook(filename=file_name_excel, encoding_override=\"utf-8\")\n except xlrd.XLRDError:\n print \"The file at the location {} is not a valid excel format\".format(file_name_excel)\n sys.exit()\n sheet = wb.sheet_by_index(0)\n texts = []\n text_location_dict = {}\n try:\n for row in range(1,sheet.nrows):\n row_dict = {}\n for col in range(sheet.ncols):\n if sheet.cell(row,col).ctype == 3: # 1 is type text, 3 xldate\n date_tuple = xlrd.xldate_as_tuple(sheet.cell_value(row,col), wb.datemode)\n date_py = datetime.datetime(*date_tuple)\n row_dict.update({sheet.cell_value(0,col): date_py}) # a datetime.datetime obj is stored\n else:\n row_dict.update({sheet.cell_value(0,col):sheet.cell_value(row,col)})\n unique_name = str(row_dict[TXT_ID])\n t = TxtItemLetterExcel(unique_name, **row_dict)\n \n if t.unique_name not in text_location_dict:\n t.add_page(getattr(t, PAGE_COL), getattr(t, TIMESTAMP_COL), getattr(t, TRANSCRIPTION_COL)) #note: has to be tested if attributes are correctly imported!\n texts.append(t)\n #dictionary to map text ids with object location - for quick access of individual items\n text_location_dict[t.unique_name] = len(texts)-1\n else:\n # l.Translation - 'Translation' is the name that was given to the column in the Excel file - if the name changes the attribute will change too\n texts[text_location_dict[t.unique_name]].add_page(getattr(t, PAGE_COL), getattr(t, TIMESTAMP_COL), getattr(t, TRANSCRIPTION_COL))\n except KeyError:\n print \"KeyError: possible cause - column names in settings file are not found in the excel source file\"\n sys.exit()\n #add a txt file folder to each object\n file_path = corpus_dir + os.sep + \"txt\"\n for txt_item in texts:\n file_name = txt_item.unique_name + \".txt\"\n txt_item.add_txt_file(file_path, file_name)\n return texts, text_location_dict", "def convert_tallies_and_times(contents):\n date_contents = contents.split('\\n')\n\n # Place the dates into a dictionary as keys and tallies as values\n dates_tallies = {}\n for string in date_contents:\n dates, tallies = string.split(':')\n dates_tallies[dates] = tallies\n return dates_tallies", "def get_time(\n filepath,\n year_ref=2000,\n ):\n with xr.open_dataset(filepath) as fdata:\n # load time\n if 'Time' in fdata.dims:\n if 'xtime' in fdata.data_vars:\n xtime = fdata['xtime'].astype(str)\n elif 'xtime_startMonthly' in fdata.data_vars:\n xtime = fdata['xtime_startMonthly'].astype(str)\n else:\n print('Time variable not found. Using indices instead...')\n return np.arange(fdata.dims['Time'])\n else:\n return None\n time_str = [x.strip() for x in xtime.values]\n if int(time_str[0][:4]) < 1678:\n time_str = ['{:04d}'.format(int(s[:4])+year_ref)+s[4:] for s in time_str]\n time = pd.to_datetime(time_str, format='%Y-%m-%d_%H:%M:%S')\n return time", "def _read_antti_datetime(dt_file):\n # NOTE: genfromtxt() doesn't work with gzipped files as it should, so we\n # unzip the file ourself, and use io.BytesIO to fake out genfromtext()\n if dt_file.split('.')[-1] == 'gz':\n ff = gzip.open(dt_file, 'r')\n else:\n ff = open(dt_file, 'r')\n\n sIO = io.BytesIO(ff.read().encode())\n ff.close()\n\n ymdHMS = np.genfromtxt(sIO, comments=\"%\")\n DT = np.array([dt.datetime(*elem) for elem in ymdHMS.astype('int')])\n sIO.close()\n\n return DT", "def extract_datetime(filename) -> datetime:\n date_part = filename[-26:-7]\n return datetime.strptime(date_part, '%Y-%m-%d_%H-%M-%S')", "def get_start_end_dates(row):\n\n if row['wo_id'] == 'UTLY' or row['wo_id'] == 'TSW':\n return row['job_start_dt'], row['job_end_dt']\n\n else:\n\n if row['job_completed_cbox'] == 1:\n return row['job_end_dt'], row['job_end_dt']\n\n else:\n return row['start'], row['end']", "def test_getExistDates(self):\n cases = [\n (self.test_eac + \"NE01201.xml\",\"1858-01-01T00:00:00Z\",\"1935-08-21T00:00:00Z\"),\n (self.test_eac + \"NE00300.xml\",\"1960-01-01T00:00:00Z\",\"1977-12-31T00:00:00Z\"),\n (self.test_eac + \"NE01500.xml\",\"1981-01-01T00:00:00Z\",\"1981-12-31T00:00:00Z\")\n ]\n for case in cases:\n source, expected_from_date, expected_to_date = case\n doc = EacCpf.EacCpf(source, 'http://www.example.com')\n self.assertNotEqual(doc, None)\n fromDate, toDate = doc.getExistDates()\n self.assertEqual(fromDate, expected_from_date)\n self.assertEqual(toDate, expected_to_date)", "def file_times(self, cycle_start, fc_start, fc_hours):\n\n logging.info('period_hours = %d' % self.period_hours)\n if self.period_hours not in [1, 3]:\n raise GribError('period_hours = %d must be 1 or 3' % self.period_hours) \n\n \n g=self.grib_forecast_hours_periods\n fc_seq = [] \n for i in range(0, len(g)):\n\t fc_seq += list(range(max(int(fc_start), 0 if i is 0 else g[i-1]['hours'] + g[i]['period']), \n\t g[i]['hours'] + g[i]['period'], g[i]['period']))\n # get all time points up to fc_hours plus one (we must cover entire timespan)\n fc_list = [x for x in fc_seq if x < fc_hours]\n fc_list.append(fc_seq[len(fc_list)])\n\n colmet_files_utc = [cycle_start + timedelta(hours = x) for x in range(int(fc_start), fc_list[-1] +1, self.period_hours)]\n \n return fc_list, colmet_files_utc", "def get_dates(txt):\n txt = re.sub(r'[^\\w\\s]', '', txt)\n txt_token = txt.split()\n return get_dates_from_token_list(txt_token)", "def convert_excelDateTime(self, value, return_type):\n\n try:\n converted_value = xldate.xldate_as_datetime(value,\n self.wb.datemode)\n if return_type == \"date\":\n converted_value = converted_value.date().isoformat()\n if return_type == \"time\":\n converted_value = converted_value.time().isoformat()\n except:\n converted_value = \"\"\n\n return converted_value", "def find_date(self, raw_file):\r\n # Reset variables\r\n self.time_range = []\r\n\r\n # Add previous file's mod timestamp, wd and current file's timestamp, wd\r\n # to time range\r\n if not self.is_carved_gzip and self.use_file_mod_dates:\r\n c_time_1 = str(self.time_range_src_mod[2])[:10].replace(\"-\", \".\")\r\n c_time_2 = str(self.time_range_src_mod[3])[:10].replace(\"-\", \".\")\r\n\r\n self.time_range.append([self.time_range_src_mod[0], c_time_1])\r\n self.time_range.append([self.time_range_src_mod[1], c_time_2])\r\n\r\n # Regex's for logs with dates in name\r\n regex_1 = (\"private/var/log/asl/[\\x30-\\x39]{4}[.][\\x30-\\x39]{2}\" +\r\n \"[.][\\x30-\\x39]{2}[.][\\x30-\\x7a]{2,8}[.]asl\")\r\n regex_2 = (\"mobile/Library/Logs/CrashReporter/DiagnosticLogs/security[.]log\" +\r\n \"[.][\\x30-\\x39]{8}T[\\x30-\\x39]{6}Z\")\r\n regex_3 = (\"private/var/log/asl/Logs/aslmanager[.][\\x30-\\x39]{8}T[\\x30-\\x39]\" +\r\n \"{6}[-][\\x30-\\x39]{2}\")\r\n regex_4 = (\"private/var/log/DiagnosticMessages/[\\x30-\\x39]{4}[.][\\x30-\\x39]{2}\" +\r\n \"[.][\\x30-\\x39]{2}[.]asl\")\r\n regex_5 = (\"private/var/log/com[.]apple[.]clouddocs[.]asl/[\\x30-\\x39]{4}[.]\" +\r\n \"[\\x30-\\x39]{2}[.][\\x30-\\x39]{2}[.]asl\")\r\n regex_6 = (\"private/var/log/powermanagement/[\\x30-\\x39]{4}[.][\\x30-\\x39]{2}[.]\" +\r\n \"[\\x30-\\x39]{2}[.]asl\")\r\n regex_7 = (\"private/var/log/asl/AUX[.][\\x30-\\x39]{4}[.][\\x30-\\x39]{2}[.]\" +\r\n \"[\\x30-\\x39]{2}/[0-9]{9}\")\r\n regex_8 = \"private/var/audit/[\\x30-\\x39]{14}[.]not_terminated\"\r\n\r\n # Regex that matches only events with created flag\r\n flag_regex = (\"[\\x00-\\xFF]{9}[\\x01|\\x11|\\x21|\\x31|\\x41|\\x51|\\x61|\\x05|\\x15|\" +\r\n \"\\x25|\\x35|\\x45|\\x55|\\x65]\")\r\n\r\n # Concatenating date, flag matching regexes\r\n # Also grabs working descriptor for record\r\n m_regex = \"(\" + regex_1 + \"|\" + regex_2 + \"|\" + regex_3 + \"|\" + regex_4 + \"|\" + regex_5\r\n m_regex = m_regex + \"|\" + regex_6 + \"|\" + regex_7 + \"|\" + regex_8 + \")\" + flag_regex\r\n\r\n # Start searching within fsevent file for events that match dates regex\r\n # As the length of each log location is different, create if statements for each\r\n # so that the date can be pulled from the correct location within the fullpath\r\n\r\n #decode to latin as it preveserves all lengths (1 byte == 1 character)\r\n raw_file_ascii = raw_file.decode(\"latin_1\")\r\n for match in re.finditer(m_regex, raw_file_ascii):\r\n if raw_file_ascii[match.regs[0][0]:match.regs[0][0] + 35] == \"private/var/log/asl/Logs/aslmanager\":\r\n # Clear timestamp temp variable\r\n t_temp = ''\r\n # t_start uses the start offset of the match\r\n t_start = match.regs[0][0] + 36\r\n # The date is 8 chars long in the format of yyyymmdd\r\n t_end = t_start + 8\r\n # Strip the date from the fsevent file\r\n t_temp = raw_file_ascii[t_start:t_end]\r\n # Format the date\r\n t_temp = t_temp[:4] + \".\" + t_temp[4:6] + \".\" + t_temp[6:8]\r\n wd_temp = struct.unpack(\"<Q\", raw_file[match.regs[0][1] - 9:match.regs[0][1] - 1])[0]\r\n elif raw_file_ascii[match.regs[0][0]:match.regs[0][0] + 23] == \"private/var/log/asl/AUX\":\r\n # Clear timestamp temp variable\r\n t_temp = ''\r\n # t_start uses the start offset of the match\r\n t_start = match.regs[0][0] + 24\r\n # The date is 10 chars long in the format of yyyy.mm.dd\r\n t_end = t_start + 10\r\n # Strip the date from the fsevent file\r\n t_temp = raw_file_ascii[t_start:t_end]\r\n wd_temp = struct.unpack(\"<Q\", raw_file[match.regs[0][1] - 9:match.regs[0][1] - 1])[0]\r\n elif raw_file_ascii[match.regs[0][0]:match.regs[0][0] + 19] == \"private/var/log/asl\":\r\n # Clear timestamp temp variable\r\n t_temp = ''\r\n # t_start uses the start offset of the match\r\n t_start = match.regs[0][0] + 20\r\n # The date is 10 chars long in the format of yyyy.mm.dd\r\n t_end = t_start + 10\r\n # Strip the date from the fsevent file\r\n t_temp = raw_file_ascii[t_start:t_end]\r\n wd_temp = struct.unpack(\"<Q\", raw_file[match.regs[0][1] - 9:match.regs[0][1] - 1])[0]\r\n elif raw_file_ascii[match.regs[0][0]:match.regs[0][0] + 4] == \"mobi\":\r\n # Clear timestamp temp variable\r\n t_temp = ''\r\n # t_start uses the start offset of the match\r\n t_start = match.regs[0][0] + 62\r\n # The date is 8 chars long in the format of yyyymmdd\r\n t_end = t_start + 8\r\n # Strip the date from the fsevent file\r\n t_temp = raw_file_ascii[t_start:t_end]\r\n # Format the date\r\n t_temp = t_temp[:4] + \".\" + t_temp[4:6] + \".\" + t_temp[6:8]\r\n wd_temp = struct.unpack(\"<Q\", raw_file[match.regs[0][1] - 9:match.regs[0][1] - 1])[0]\r\n elif raw_file_ascii[match.regs[0][0]:match.regs[0][0] + 34] == \"private/var/log/DiagnosticMessages\":\r\n # Clear timestamp temp variable\r\n t_temp = ''\r\n # t_start uses the start offset of the match\r\n t_start = match.regs[0][0] + 35\r\n # The date is 10 chars long in the format of yyyy.mm.dd\r\n t_end = t_start + 10\r\n # Strip the date from the fsevent file\r\n t_temp = raw_file_ascii[t_start:t_end]\r\n wd_temp = struct.unpack(\"<Q\", raw_file[match.regs[0][1] - 9:match.regs[0][1] - 1])[0]\r\n elif raw_file_ascii[match.regs[0][0]:match.regs[0][0] + 39] == \"private/var/log/com.apple.clouddocs.asl\":\r\n # Clear timestamp temp variable\r\n t_temp = ''\r\n # t_start uses the start offset of the match\r\n t_start = match.regs[0][0] + 40\r\n # The date is 10 chars long in the format of yyyy.mm.dd\r\n t_end = t_start + 10\r\n # Strip the date from the fsevent file\r\n t_temp = raw_file_ascii[t_start:t_end]\r\n wd_temp = struct.unpack(\"<Q\", raw_file[match.regs[0][1] - 9:match.regs[0][1] - 1])[0]\r\n elif raw_file_ascii[match.regs[0][0]:match.regs[0][0] + 31] == \"private/var/log/powermanagement\":\r\n # Clear timestamp temp variable\r\n t_temp = ''\r\n # t_start uses the start offset of the match\r\n t_start = match.regs[0][0] + 32\r\n # The date is 10 chars long in the format of yyyy.mm.dd\r\n t_end = t_start + 10\r\n # Strip the date from the fsevent file\r\n t_temp = raw_file_ascii[t_start:t_end]\r\n wd_temp = struct.unpack(\"<Q\", raw_file[match.regs[0][1] - 9:match.regs[0][1] - 1])[0]\r\n elif raw_file_ascii[match.regs[0][0]:match.regs[0][0] + 17] == \"private/var/audit\":\r\n # Clear timestamp temp variable\r\n t_temp = ''\r\n # t_start uses the start offset of the match\r\n t_start = match.regs[0][0] + 18\r\n # The date is 8 chars long in the format of yyyymmdd\r\n t_end = t_start + 8\r\n # Strip the date from the fsevent file\r\n t_temp = raw_file_ascii[t_start:t_end]\r\n # Format the date\r\n t_temp = t_temp[:4] + \".\" + t_temp[4:6] + \".\" + t_temp[6:8]\r\n wd_temp = struct.unpack(\"<Q\", raw_file[match.regs[0][1] - 9:match.regs[0][1] - 1])[0]\r\n else:\r\n t_temp = ''\r\n wd_temp = ''\r\n # Append date, wd to time range list\r\n self.time_range.append([wd_temp, t_temp])\r\n # Sort the time range list by wd\r\n self.time_range = sorted(self.time_range, key=self.get_key)\r\n\r\n # Call the time range builder to rebuild time range\r\n self.build_time_range()", "def readATPMatchesParseTime(dirname):\n\tallFiles = glob.glob(dirname + \"/atp_matches_\" + \"20??.csv\")\n\tallFiles = allFiles[:-1] ## avoid 2017 since its incomplete\n\tmatches = pd.DataFrame()\n\tcontainer = list()\n\tfor filen in allFiles:\n\t\tdf = pd.read_csv(filen,\n\t\t\t\t\t\t index_col=None,\n\t\t\t\t\t\t header=0,\n\t\t\t\t\t\t parse_dates=[5],\n\t\t\t\t\t\t encoding = \"ISO-8859-1\",\n\t\t\t\t\t\t date_parser=lambda t:parse(t)) ##errored out here\n\t\tcontainer.append(df)\n\tmatches = pd.concat(container)\n\treturn matches", "def filter_absolute_timexes():\n\n\n timexes = pd.read_excel('../TimeDatasets/i2b2 Data/i2b2_timexe_annotations.xlsx')\n\n timexes = timexes[timexes['type'].isin(['DATE', 'TIME'])]\n\n print('DATE AND TIME')\n print(timexes)\n absolute_timexes = timexes[ [is_absolute_timexe(string) for string in timexes['ann_text']] ]\n\n print('ABSOLUTE TIMEXES')\n print(absolute_timexes)\n\n absolute_timexes.to_excel('absolute_timexes.xlsx')\n relative_timexes = timexes[[(not is_absolute_timexe(string)) for string in timexes['ann_text']]]\n\n relative_timexes.to_excel('filtered_timexes.xlsx')\n\n # add the absolute characteristic as a boolean attribute of the timexe dataframe\n\n timexes['absolute'] = [is_absolute_timexe(string) for string in timexes['ann_text']]\n\n timexes.to_excel('date_and_time.xlsx')\n\n print('RELATIVE TIMEXES')\n print(relative_timexes)\n\n # Print the results\n\n print(len(timexes[(timexes.absolute == False) & (timexes.test == False)]['docname'].unique()))\n\n train_relatives = timexes[(timexes.absolute == False) & (timexes.test == False)]\n test_relatives = timexes[(timexes.absolute == False) & (timexes.test == True)]\n\n print('Train set : ' + str(len(train_relatives)) + \" relative time expressions\")\n print('Test set : ' + str(len(test_relatives)) + \" relative time expressions\")\n\n return relative_timexes", "def read_timestamps(self, tasks):\n from reframe.core.deferrable import evaluate\n\n self.begin_stamps = []\n self.end_stamps = []\n for t in tasks:\n with open(evaluate(t.check.stdout), 'r') as f:\n self.begin_stamps.append(float(f.readline().strip()))\n self.end_stamps.append(float(f.readline().strip()))\n\n self.begin_stamps.sort()\n self.end_stamps.sort()", "def date_parser(dates):\n\n #splitting the dates(containing datetime data) list and returning only the datetime\n return([item.split()[0] for item in dates])\n pass", "def read_traveltime(self):\r\n \r\n #### read Travel time from txt file\r\n \r\n \r\n #### Particle travel time branch 1\r\n excelfile_surface_branch1_high = r'excel\\flow_rate\\particle_surface_branch1_high.xlsx'\r\n inarray_surface_branch1_high = pd.read_excel(excelfile_surface_branch1_high).to_numpy() \r\n \r\n excelfile_surface_branch1_medium = r'excel\\flow_rate\\particle_surface_branch1_medium.xlsx'\r\n inarray_surface_branch1_medium = pd.read_excel(excelfile_surface_branch1_medium).to_numpy() \r\n \r\n excelfile_surface_branch1_low = r'excel\\flow_rate\\particle_surface_branch1_low.xlsx'\r\n inarray_surface_branch1_low = pd.read_excel(excelfile_surface_branch1_low).to_numpy()\r\n \r\n excelfile_bottom_branch1_high = r'excel\\flow_rate\\particle_bottom_branch1_high.xlsx'\r\n inarray_bottom_branch1_high = pd.read_excel(excelfile_bottom_branch1_high).to_numpy()\r\n \r\n excelfile_bottom_branch1_medium = r'excel\\flow_rate\\particle_bottom_branch1_medium.xlsx'\r\n inarray_bottom_branch1_medium = pd.read_excel(excelfile_bottom_branch1_medium).to_numpy()\r\n \r\n excelfile_bottom_branch1_low = r'excel\\flow_rate\\particle_bottom_branch1_low.xlsx'\r\n inarray_bottom_branch1_low = pd.read_excel(excelfile_bottom_branch1_low).to_numpy()\r\n \r\n \r\n #### Tracer travel time branch 1\r\n excelfile_tracer_branch1_high = r'excel\\flow_rate\\tracer_branch1_high.xlsx'\r\n inarray_tracer_branch1_high = pd.read_excel(excelfile_tracer_branch1_high).to_numpy()\r\n \r\n excelfile_tracer_branch1_medium = r'excel\\flow_rate\\tracer_branch1_medium.xlsx'\r\n inarray_tracer_branch1_medium = pd.read_excel(excelfile_tracer_branch1_medium).to_numpy()\r\n \r\n excelfile_tracer_branch1_low = r'excel\\flow_rate\\tracer_branch1_low.xlsx'\r\n inarray_tracer_branch1_low = pd.read_excel(excelfile_tracer_branch1_low).to_numpy()\r\n \r\n self.inarrays_branch1 = [inarray_surface_branch1_high, inarray_surface_branch1_medium, inarray_surface_branch1_low, \\\r\n inarray_bottom_branch1_high, inarray_bottom_branch1_medium, inarray_bottom_branch1_low, \\\r\n inarray_tracer_branch1_high, inarray_tracer_branch1_medium, inarray_tracer_branch1_low]\r\n \r\n \r\n #### Particle travel time branch 5\r\n excelfile_surface_branch5_high = r'excel\\flow_rate\\particle_surface_branch5_high.xlsx'\r\n inarray_surface_branch5_high = pd.read_excel(excelfile_surface_branch5_high).to_numpy()\r\n \r\n excelfile_surface_branch5_medium = r'excel\\flow_rate\\particle_surface_branch5_medium.xlsx'\r\n inarray_surface_branch5_medium = pd.read_excel(excelfile_surface_branch5_medium).to_numpy()\r\n \r\n excelfile_surface_branch5_low = r'excel\\flow_rate\\particle_surface_branch5_low.xlsx'\r\n inarray_surface_branch5_low = pd.read_excel(excelfile_surface_branch5_low).to_numpy()\r\n \r\n excelfile_bottom_branch5_high = r'excel\\flow_rate\\particle_bottom_branch5_high.xlsx'\r\n inarray_bottom_branch5_high = pd.read_excel(excelfile_bottom_branch5_high).to_numpy()\r\n \r\n excelfile_bottom_branch5_medium = r'excel\\flow_rate\\particle_bottom_branch5_medium.xlsx'\r\n inarray_bottom_branch5_medium = pd.read_excel(excelfile_bottom_branch5_medium).to_numpy()\r\n \r\n excelfile_bottom_branch5_low = r'excel\\flow_rate\\particle_bottom_branch5_low.xlsx'\r\n inarray_bottom_branch5_low = pd.read_excel(excelfile_bottom_branch5_low).to_numpy()\r\n \r\n \r\n #### Tracer travel time branch 5\r\n excelfile_tracer_branch5_high = r'excel\\flow_rate\\tracer_branch5_high.xlsx'\r\n inarray_tracer_branch5_high = pd.read_excel(excelfile_tracer_branch5_high).to_numpy()\r\n \r\n excelfile_tracer_branch5_medium = r'excel\\flow_rate\\tracer_branch5_medium.xlsx'\r\n inarray_tracer_branch5_medium = pd.read_excel(excelfile_tracer_branch5_medium).to_numpy()\r\n \r\n excelfile_tracer_branch5_low = r'excel\\flow_rate\\tracer_branch5_low.xlsx'\r\n inarray_tracer_branch5_low = pd.read_excel(excelfile_tracer_branch5_low).to_numpy()\r\n \r\n \r\n self.inarrays_branch5 = [inarray_surface_branch5_high, inarray_surface_branch5_medium, inarray_surface_branch5_low, \\\r\n inarray_bottom_branch5_high, inarray_bottom_branch5_medium, inarray_bottom_branch5_low, \\\r\n inarray_tracer_branch5_high, inarray_tracer_branch5_medium, inarray_tracer_branch5_low]", "def por_flocs():\n return pd.read_excel('por_flocs.xlsx')", "def allexposuretimes(self):\n exposures1 = exposures2 = None\n if self.__allexposures is not None:\n exposures = [_ for _ in self.__allexposures if _.n == self.n]\n exposures1 = [e.exp for e in exposures]\n if self.__xmlfolder is not None:\n exposures2 = [exposuretimeandbroadbandfilter[0] for exposuretimeandbroadbandfilter in self.__allexposuretimesandbroadbandfilters]\n if exposures1 is exposures2 is None:\n raise ValueError(\"Can't get the exposure times unless you provide the xml folder or exposures csv\")\n if None is not exposures1 != exposures2 is not None:\n raise ValueError(f\"Found inconsistent exposure times from exposures.csv and from xml file:\\n{exposures1}\\n{exposures2}\")\n return exposures1 if exposures1 is not None else exposures2", "def readAll():\n result = _rc.readAttribute(OPTYPE.TIME_OF_FLIGHT)\n mm = bytesToIntArray(result, 2, signed=False)\n return [mm[0] / 10, mm[1] / 10]", "def get_release_dates():\n path = os.path.join(c.cfg['default']['data'], 'COT', 'ReleaseDates.txt')\n\n # Read Data from csv file\n dates = pd.read_csv(path, index_col=0, parse_dates=True)\n\n \n dates.index.names = [None]\n # Format Release Dates to Timestamp (as this is the original format and will be used later)!\n dates['ReleaseDate'] = pd.to_datetime(dates['ReleaseDate'], format='%Y-%m-%d')\n\n return dates" ]
[ "0.6484449", "0.5983393", "0.59567344", "0.5858225", "0.5803949", "0.5792485", "0.5662788", "0.56464463", "0.5638216", "0.5618037", "0.5579476", "0.5578673", "0.55383044", "0.5533895", "0.5525505", "0.55171365", "0.55086786", "0.54955643", "0.54917186", "0.5489937", "0.54851663", "0.54831266", "0.5475454", "0.5462265", "0.5452194", "0.54521203", "0.5431147", "0.5430281", "0.5428513", "0.5395187", "0.53895915", "0.5364466", "0.5346592", "0.5339124", "0.5321377", "0.5320832", "0.5319099", "0.53000736", "0.5287632", "0.5245196", "0.5242507", "0.5241817", "0.52352166", "0.5230151", "0.522825", "0.5209041", "0.5208977", "0.5203262", "0.51915604", "0.5190601", "0.5188097", "0.5187615", "0.5183233", "0.518006", "0.5173016", "0.51698625", "0.51682425", "0.5166955", "0.51614624", "0.515443", "0.5152535", "0.51393354", "0.51385486", "0.51330096", "0.51222926", "0.51202315", "0.51183623", "0.51044697", "0.5077578", "0.50769764", "0.50722814", "0.50712585", "0.5069329", "0.50549597", "0.50535005", "0.50518733", "0.50454235", "0.5036251", "0.5028839", "0.50268954", "0.5021726", "0.50137705", "0.50133514", "0.5012685", "0.5012043", "0.50097656", "0.50022894", "0.5001884", "0.49986714", "0.49922755", "0.4988575", "0.49866858", "0.49821427", "0.49802172", "0.4977303", "0.4975728", "0.49695903", "0.49677995", "0.49657696", "0.4963225" ]
0.63838965
1
Write the predictions in the filename according to Kaggle format.
def write_predictions(pred, filename="pred.csv"): output_file = open(filename, "wb") writer = csv.writer(output_file) datetimes = get_datetimes("test.csv") writer.writerow(["datetime", "count"]) for index, count in enumerate(pred): writer.writerow([datetimes[index], int(count)]) output_file.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_pred_kaggle_file(cls, outfname, speech):\n yp = cls.predict(speech.test_doc_vec)\n labels = speech.le.inverse_transform(yp)\n f = codecs.open(outfname, 'w')\n f.write(\"FileIndex,Category\\n\")\n for i in range(len(speech.test_fnames)):\n fname = speech.test_fnames[i]\n f.write(fname + ',' + labels[i] + '\\n')\n f.close()", "def save_predictions(self,file_path):\n # compute average of predictions\n num_examples = len(self.labels)\n\n if num_examples == 0:\n raise Exception (\"nothing to save\")\n\n def string_to_average(string):\n return np.average(np.array(string.split(\",\"),dtype=float))\n prediction_averages = np.around(map(string_to_average,self.predictions),decimals=3)\n\n # sort by prediction averages\n order = np.flipud(prediction_averages.argsort())\n prediction_averages = prediction_averages[order]\n self.pl_pairs = self.pl_pairs[order]\n self.predictions = self.predictions[order]\n self.labels = self.labels[order]\n # write all of the predictions to the file\n f = open(file_path + \"_predictions.txt\", 'w')\n\n for i in range(num_examples):\n f.write((str(prediction_averages[i]) + \" \"*10)[:10]\n + (str(self.labels[i]) + \" \"*50)[:10]\n + str(self.pl_pairs[i] + \" \"*50)[:50]\n + str(self.predictions[i] + \" \"*50)[:50]\n + \"\\n\")\n\n f.close()\n # write and save some metadata\n\n f = open(file_path + \"_scores.txt\", 'w')\n f.write(\"top 100 score: \")\n f.write(str(self.top_100_score(self.predictions,self.labels)))\n f.write(\"\\nAUC: \")\n f.write(str(self.auc(prediction_averages,self.labels)))\n f.write(\"\\nconfusion matrix: \")\n f.write(str(self.confusion_matrix(prediction_averages,self.labels)))\n f.close()\n\n # write a file in Kaggle MAP{K} submision format\n # the form is:\n # Protein1, Ligand3 Ligand4 Ligand2\n # Protein2, Ligand5 Ligand9 Ligand7\n\n raw_database_array = np.genfromtxt(FLAGS.test_set_file_path, delimiter=',', dtype=str)\n receptor_set = raw_database_array[:,2]\n receptor_set = list(set(map(lambda x:x.split('.')[0].split('/')[-1],receptor_set)))\n submission = {}\n for i in range(num_examples):\n # get the name of the ligand and protein\n ligand,receptor = self.pl_pairs[i].split(',')\n ligand = ligand.split('/')[-1].split('.')[0]\n receptor = receptor.split('/')[-1].split('.')[0]\n # add all protein-ligand pairs to submission\n if not receptor in submission.keys():\n submission[receptor] = {}\n submission[receptor]['ligands'] = [ligand]\n submission[receptor]['score'] = [prediction_averages[i]]\n else:\n submission[receptor]['ligands'].append(ligand)\n submission[receptor]['score'].append(prediction_averages[i])\n \n # write and save submisison to file\n # if failed to predict any liagnd for a receptor\n # use placeholder 'L' as predict result\n # e.g. P1234,L\n with open(file_path+'_submission.csv','w') as f:\n f.write('Id,Expected\\n')\n for key in receptor_set:\n if key in submission.keys():\n ligands = np.array(submission[key]['ligands'])\n scores = np.array(submission[key]['score'])\n ligands = ligands[np.flipud(scores.argsort())]\n f.write(key+','+' '.join(ligands)+'\\n')\n else:\n f.write(key+','+'L'+'\\n')", "def save_prediction(predictions, image_file, path):\n\t\n\tsave_file = convert_file_extension_to_txt(image_file)\n\t\n\twith open(os.path.join(path, save_file), 'w') as f:\n\t\tfor prediction in predictions:\n\t\t\tf.write(str(prediction) + \"\\n\")", "def write_predictions(y_pred, filename, yname=None) :\n out = open(filename, 'wb')\n f = csv.writer(out)\n if yname :\n f.writerow([yname])\n f.writerows(list(zip(y_pred)))\n out.close()", "def write_predictions(y_pred, filename, yname=None) :\n out = open(filename, 'wb')\n f = csv.writer(out)\n if yname :\n f.writerow([yname])\n f.writerows(list(zip(y_pred)))\n out.close()", "def write_predictions(y_pred, filename, yname=None) :\n out = open(filename, 'wb')\n f = csv.writer(out)\n if yname :\n f.writerow([yname])\n f.writerows(zip(y_pred))\n out.close()", "def write(self, predictions, filename):\n assert predictions.ndim == 2 or predictions.ndim == 3\n\n driver = self.dataset.GetDriver()\n\n if predictions.ndim == 2:\n dst_ds = driver.CreateCopy(filename, self.dataset)\n # Overwrite the raster band with the predicted labels\n band = dst_ds.GetRasterBand(1)\n band.WriteArray(predictions)\n else:\n for t in range(predictions.shape[0]):\n dst_filename = filename.replace('.tif', '_{}.tif'.format(t))\n dst_ds = driver.CreateCopy(dst_filename, self.dataset)\n\n # Overwrite the raster band with the predicted labels\n band = dst_ds.GetRasterBand(1)\n band.WriteArray(predictions[t])", "def save_prediction(self, meta, y_pred, y, filename):\n df = pd.DataFrame(meta)\n df['y_pred'] = y_pred\n df['y'] = y\n print(df)\n df.loc[:, 'id'] = df.index\n self.df_to_csv(df, filename, store_header=False)", "def write_predictions_to_file(predictor, testDataFname, enc, outputFname, features=None):\n\n testData, _, testDataIds, _ = make_data(testDataFname, features=features, enc=enc)\n\n dt = datetime.now()\n predictions = predictor.predict(testData)\n print 'predicting took', datetime.now() - dt\n\n featureSelectionOutput = np.transpose(np.vstack((testDataIds, predictions.round().astype(int))))\n\n with open(outputFname, 'wb') as outputFile:\n writer = csv.writer(outputFile)\n writer.writerow(['id', 'loss'])\n writer.writerows(featureSelectionOutput)", "def output_predictions(predictions_file, relations, predictions, test_set_keys, test_labels):\n with codecs.open(predictions_file, 'w', 'utf-8') as f_out:\n for i, (w1, w2) in enumerate(test_set_keys):\n f_out.write('\\t'.join([w1, w2, relations[test_labels[i]], relations[predictions[i]]]) + '\\n')", "def save_results(predictions, filename):\n with open(filename, 'w') as f:\n f.write(\"id,ACTION\\n\")\n for i, pred in enumerate(predictions):\n f.write(\"%d,%f\\n\" % (i + 1, pred))", "def write_predictions(prediction_dic, result_path):\n with open(result_path, 'wb') as outfile:\n outfile.write(bytes('Patient_ID,HPV/p16_status\\n', 'UTF-8'))\n for patient_id, pred in prediction_dic.items():\n outfile.write(bytes(str(patient_id) + ',' + str(pred) + '\\n', 'UTF-8'))", "def save_predictions(model, dataset, output_dir):\n preds = model.predict(dataset, verbose=1)\n preds = scipy.special.softmax(preds, 1) # Apply softmax\n with tf.io.gfile.GFile(os.path.join(output_dir, 'test_preds.pkl'), 'wb') as f:\n pickle.dump(preds, f)", "def write(self, predictions, filename):\n driver = self.dataset.GetDriver()\n dst_ds = driver.CreateCopy(filename, self.dataset)\n\n prediction_array = np.zeros_like(self.segmentation)\n for prediction, y, x in predictions:\n prediction_array[y:y + self.size, x:x + self.size] = prediction\n\n # Overwrite the raster band with the predicted labels\n band = dst_ds.GetRasterBand(1)\n band.WriteArray(prediction_array)", "def save_predictions(battle_name: str, data: str, predictions: List):\n path = './data_reader/data/predictions/' + data + '.' + battle_name\n with open(path, 'w') as outfile:\n for prediction in predictions:\n outfile.write(str(prediction) + '\\n')", "def save_predictions(gtfilename, loss_type, probs, preds, outfile):\n\n # 1. get file ids\n liste_fileids = []\n targets = []\n passFirstLine=True\n with open(gtfilename, 'r') as fh:\n for line in fh:\n if passFirstLine:\n passFirstLine = False\n continue\n tmp = line.rstrip().split(',')\n liste_fileids.append(tmp[0])\n targets.append(tmp[1])\n\n print 'liste_fileids', len(liste_fileids)\n # 2. save preds\n import csv\n with open(outfile, 'w') as csvfile:\n # fieldnames = ['itemid', 'hasbird', 'pred', 'gt']\n fieldnames = ['itemid', 'hasbird']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n if loss_type == 'categorical_crossentropy':\n for i, id in enumerate(liste_fileids):\n # writer.writerow({'itemid': id, 'hasbird': probs[i, 1], 'pred': preds[i], 'gt': targets[i]})\n writer.writerow({'itemid': id, 'hasbird': probs[i, 1]})\n elif loss_type == 'binary_hinge' or loss_type == 'binary_crossentropy' or loss_type == 'weighted_binary_crossentropy':\n for i, id in enumerate(liste_fileids):\n # writer.writerow({'itemid': id, 'hasbird': probs[i][0], 'pred': preds[i], 'gt': targets[i]})\n writer.writerow({'itemid': id, 'hasbird': probs[i][0]})\n\n print \"INFO: predictions (positive class probas) saved to file:\", outfile", "def write_classifier_output(location, folds, labels, predictions, class_probs, names=None):\n with open(os.path.join(location, '-'.join([\"classifier\", \"fold\", \"predictions\"]) + '.txt'), 'w') as out_file:\n for fold in range(folds):\n out_file.write(\"fold \" + str(fold+1) + ':\\n')\n out_file.write(\"{:50} {:<12} {:<12} {:<9} {:<9}\\n\".format(\"recording\", \"prediction\", \"label\", \"class 0\",\n \"class 1\"))\n fold_labels, fold_predictions, fold_class_probs = labels[fold], predictions[fold], class_probs[fold]\n\n if names is not None and len(names) != 0:\n fold_names = np.hstack(names[fold])\n else:\n fold_names = len(fold_predictions) * ['']\n\n for pred_lab_tuple in zip(fold_names, fold_predictions, fold_labels, fold_class_probs[:, 0],\n fold_class_probs[:, 1]):\n (name, pred, label, prob1, prob2) = pred_lab_tuple\n out_file.write(\"{:50} {:<12} {:<12} {:<9.2f} {:<9.2f}\\n\".format(name, pred, label, prob1, prob2))\n out_file.write('\\n')", "def generate_train_txt(name, path):\n with open(path + '/test.txt', 'a') as file:\n file.write('/content/YOLO_metric/data/obj/' + name + '\\n')", "def save_predictions(predictions, img_paths, output_dir='predictions'):\n\n print(f'\\nSaving prediction to {output_dir} ...')\n\n if not osp.exists(output_dir):\n os.mkdir(output_dir)\n\n for pred, img_path in tqdm(zip(predictions, img_paths), total=len(predictions)):\n img_name = osp.basename(img_path)\n pred = pred.astype('uint8')\n Image.fromarray(pred * 255).save(osp.join(output_dir, img_name))", "def prediction_filename(run_setup):\n fh5 = run_setup[\"ppopts\"][\"fpredout\"]\n return fh5+\".h5\"", "def write_model(clf, filename):\n joblib.dump(clf, filename)", "def test_file(self, file_name, version, classifier_type):\n labels = []\n with open(file_name) as f:\n for line in f.readlines():\n print(line,self.predict(line))\n labels.append(self.predict(line))\n \n filename = 'test_results-' + classifier_type + '-' + version + '.txt'\n \n with open(filename, 'w') as f:\n for label in labels:\n f.write(str(label)+\"\\n\")\n \n print (\"Results from \",file_name,\" printed to:\",filename)", "def save_predictions(prediction_maps, output_file, dataset_names):\n assert len(prediction_maps) == len(dataset_names), 'Each prediction map has to have a corresponding dataset name'\n logger.info(f'Saving predictions to: {output_file}...')\n\n with h5py.File(output_file, \"w\") as output_h5:\n for prediction_map, dataset_name in zip(prediction_maps, dataset_names):\n logger.info(f\"Creating dataset '{dataset_name}'...\")\n output_h5.create_dataset(dataset_name, data=prediction_map, compression=\"gzip\")", "def save_prediction(self):\n if DataLoader.data is None:\n messagebox.showerror(\"Information\", \"Data file is empty, please load the data first.\")\n return\n if Trainer.y_pred is None:\n messagebox.showerror(\"Information\", \"Preciction has not been made, please train a new model and predict or \"\n \"load a model and predict.\")\n return\n\n path = filedialog.asksaveasfile(mode='w', defaultextension=\".csv\", filetypes=[(\"csv files\", '*.csv'),\n (\"xlsx files\", '*.xlsx'),\n (\"dat files\", '*.dat')])\n\n copy_data = DataLoader.data.copy()\n copy_data['prediction'] = Trainer.y_pred\n copy_data.to_csv(path, index=False)\n\n # Clears memory\n copy_data.drop(copy_data.index, inplace=True)\n del copy_data", "def save(self, output_folder: str, show_confidence: bool = True) -> None:\n if output_folder:\n os.makedirs(output_folder, exist_ok=True)\n\n for i, prediction in enumerate(self._images_prediction_lst):\n image_output_path = os.path.join(output_folder, f\"pred_{i}.jpg\")\n prediction.save(output_path=image_output_path, show_confidence=show_confidence)", "def save(self, directory_name: str):\n os.makedirs(directory_name, exist_ok=True)\n self._plot_accuracy()\n self._plot_losses()\n plt.xlabel('Epoch')\n plt.legend()\n plt.savefig(directory_name + '/result.png')\n plt.close()\n\n self._plot_sample(self.test_data)\n plt.savefig(directory_name + '/sample.png')\n plt.close()\n\n content = self.FORMAT.format(self.epochs,\n self.training_accuracies[-1] * 100,\n self.test_accuracies[-1] * 100,\n self.training_losses[-1],\n self.test_losses[-1],\n self.model.layers_size,\n self.model.activation,\n len(self.training_data[0]),\n len(self.test_data[0]),\n self.model.learning_rate,\n self.model.batch_size,\n len(self.validation_data[0]),\n self.training_method)\n\n with open(directory_name + '/result.md', 'w') as file:\n file.write(content)\n\n save(self.model, '{}/model.pkl'.format(directory_name))", "def write_out_prediction(predictions_file, src_seqs,\n trg_seqs, pred_string, src_feat_bundles,\n trg_feat_bundles, val_id):\n\n output_lines = []\n if trg_seqs[val_id] != pred_string:\n output_lines.append('*ERROR*')\n output_lines.append('SRC: {}'.format(src_seqs[val_id]))\n if src_feat_bundles[val_id]:\n output_lines.append('SFT: {}'.format(src_feat_bundles[val_id]))\n if trg_feat_bundles[val_id]:\n output_lines.append('TFT: {}'.format(trg_feat_bundles[val_id]))\n output_lines.append('TRG: {}'.format(trg_seqs[val_id]))\n output_lines.append('PRD: {}\\n'.format(pred_string))\n predictions_file.write('{}\\n'.format('\\n'.join(output_lines)))", "def test():\n # load dataset and model\n X, observed_y = load_data('../data/dev.txt')\n\n model = pickle.load(open('test.model', 'rb'))\n model.traverse()\n\n # predict labels for dataset\n preds = model.predict(X)\n\n # print(preds)\n # output model predictions\n np.savetxt('test.predictions', preds, fmt='%s')", "def log_inference(tester, name, description):\r\n\tfor dataset, output in tester.preds.items():\r\n\t\tresults = pandas.DataFrame.from_dict(output)\r\n\t\tpath = os.path.join(\r\n\t\t\tEXPERIMENT_PATH, tester.config[\"name\"] + '-' + dataset)\r\n\t\twith open(path + \".csv\", \"w\") as f:\r\n\t\t\tresults.to_csv(f, sep=\"\\t\", encoding='utf-8', \r\n\t\t\t\tfloat_format='%.3f', index=False)\r\n\r\n\t\twith open(path + \"-predictions.csv\", \"w\") as f:\r\n\t\t\tresults[[\"tag\", \"y_hat\"]].to_csv(\r\n\t\t\t\tf, index=False, float_format='%.3f', header=False)", "def write_predictions(self, predictions, file_path=None, is_dict=True, pycm_obj=None):\n\n try:\n super(SequenceClassification, self).write_predictions(\n predictions, file_path=file_path, is_dict=is_dict\n )\n except AttributeError:\n # TODO: Need to Fix\n model_base = ModelBase()\n model_base._log_dir = self._log_dir\n model_base._train_counter = self._train_counter\n model_base.training = self.training\n model_base.write_predictions(predictions, file_path=file_path, is_dict=is_dict)\n\n data_type = \"train\" if self.training else \"valid\"\n\n if pycm_obj is not None:\n stats_file_path = f\"predictions-{data_type}-{self._train_counter.get_display()}-stats\"\n pycm_obj.save_csv(str(Path(self._log_dir) / \"predictions\" / stats_file_path))\n\n confusion_matrix_file_path = (\n f\"predictions-{data_type}-{self._train_counter.get_display()}-confusion_matrix\"\n )\n cls_utils.write_confusion_matrix_to_csv(\n str(Path(self._log_dir) / \"predictions\" / confusion_matrix_file_path), pycm_obj\n )", "def save_predictions(self, preds_all, save_dir, scale_pred=False):\n for idx, fname in enumerate(self.test_files):\n fh = open(fname, 'rb')\n img = pil.open(fh)\n orig_h, orig_w = self.gt_depths[idx].shape\n pred_resize = cv2.resize(preds_all[idx], (orig_w, orig_h), interpolation=cv2.INTER_LINEAR)\n if scale_pred:\n scaled_disp, _ = self.scale_depth_disp(pred_resize)\n disp_img = self.generate_disparity_img(scaled_disp)\n else:\n disp_img = self.generate_disparity_img(1./pred_resize)\n\n imgname = \"{0:04d}\".format(idx)\n name_img = os.path.join(save_dir, imgname+\".jpeg\")\n img.save(name_img)\n name_disp = os.path.join(save_dir, imgname+\"_disp.jpeg\")\n disp_img.save(name_disp)", "def create_identical_truth_and_prediction_file():\r\n # Create an artificial email classification dictionary \r\n class_dict = create_classification()\r\n # Compile the filepaths\r\n truth_filepath = os.path.join(CORPUS_DIR, TRUTH_FILENAME)\r\n pred_filepath = os.path.join(CORPUS_DIR, PREDICTION_FILANAME)\r\n # Save the same dictionary as both the !truth.txt and !prediction.txt\r\n save_classification_to_file(class_dict, truth_filepath)\r\n save_classification_to_file(class_dict, pred_filepath)", "def save_feature(ndarray, feature_name, out_path, x, y, new_labels, filename=None):\n # this is kind-of standard\n filename = filename or FeatureExtractor.get_file_name(x, feature_name)\n np.save(out_path / filename, ndarray)\n new_labels.append([filename, y])\n print('info: {} transformed and saved!'.format(filename))\n return filename", "def store_classes_and_predictions(output_file_path, classes, predictions):\n with open(output_file_path, mode='a', newline='') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',')\n csvwriter.writerow(['true', 'predicted'])\n for i in range(len(classes)):\n csvwriter.writerow([classes.iloc[i], predictions.iloc[i]])", "def output_predictions(predictions, output_file=None, output_format=\"plaintext\", info=None):\n\n content = \"\"\n\n if (output_format == \"plaintext\") or (output_format == \"txt\"):\n lines = []\n for k in predictions:\n lines.append(\"- %s: %s\" % (k, predictions[k]))\n content = \"\\n\".join(lines)\n\n elif output_format == \"csv\":\n output = io.StringIO()\n writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC)\n writer.writerow([\"label\", \"probability\"])\n for k in predictions:\n writer.writerow([k, float(predictions[k])])\n content = output.getvalue()\n\n elif output_format == \"xml\":\n root = ET.Element(\"predictions\")\n if info is not None:\n for k in info:\n root.set(k, info[k])\n root.set(\"timestamp\", str(datetime.now()))\n for k in predictions:\n p = ET.SubElement(root, \"prediction\")\n p.set(\"label\", k)\n p.text = str(float(predictions[k]))\n content = minidom.parseString(ET.tostring(root)).toprettyxml(indent = \" \")\n\n elif output_format == \"json\":\n data = dict()\n if info is not None:\n info = copy.copy(info)\n else:\n info = dict()\n info[\"timestamp\"] = str(datetime.now())\n data[\"info\"] = info\n data[\"predictions\"] = dict()\n for k in predictions:\n data[\"predictions\"][k] = float(predictions[k])\n content = json.dumps(data)\n\n else:\n raise Exception(\"Unhandled format: %s\" % output_format)\n\n if output_file is None:\n print(content)\n else:\n with open(output_file, \"w\") as of:\n of.write(content)", "def save_predictions_in_panoptic_format(model,\n checkpoint_name,\n data_split,\n score_threshold,\n global_step):\n\n dataset = model.dataset\n # Round this because protobuf encodes default values as full decimal\n score_threshold = round(score_threshold, 3)\n\n # Get available prediction folders\n predictions_root_dir = pplp.root_dir() + '/data/outputs/' + \\\n checkpoint_name + '/predictions'\n\n final_predictions_root_dir = predictions_root_dir + \\\n '/final_predictions_and_scores/' + dataset.data_split\n\n final_predictions_dir = final_predictions_root_dir + \\\n '/' + str(global_step)\n\n # 3D prediction directories\n panoptic_predictions_3d_dir = predictions_root_dir + \\\n '/panoptic_pplp_eval/' + \\\n str(score_threshold) + '/' + \\\n str(global_step) + '/data'\n\n if not os.path.exists(panoptic_predictions_3d_dir):\n os.makedirs(panoptic_predictions_3d_dir)\n\n # Do conversion\n num_samples = dataset.num_samples\n num_valid_samples = 0\n\n print('\\nGlobal step:', global_step)\n print('Converting detections from:', final_predictions_dir)\n\n print('3D Detections being saved to:', panoptic_predictions_3d_dir)\n\n for sample_idx in range(num_samples):\n # Print progress\n sys.stdout.write('\\rConverting {} / {}'.format(\n sample_idx + 1, num_samples))\n sys.stdout.flush()\n\n sample_name = dataset.sample_names[sample_idx]\n\n prediction_file = sample_name + '.txt'\n\n panoptic_predictions_3d_file_path = panoptic_predictions_3d_dir + \\\n '/' + prediction_file\n\n predictions_file_path = final_predictions_dir + \\\n '/' + prediction_file\n\n # If no predictions, skip to next file\n if not os.path.exists(predictions_file_path):\n np.savetxt(panoptic_predictions_3d_file_path, [])\n continue\n\n all_predictions = np.loadtxt(predictions_file_path)\n\n # # Swap l, w for predictions where w > l\n # swapped_indices = all_predictions[:, 4] > all_predictions[:, 3]\n # fixed_predictions = np.copy(all_predictions)\n # fixed_predictions[swapped_indices, 3] = all_predictions[\n # swapped_indices, 4]\n # fixed_predictions[swapped_indices, 4] = all_predictions[\n # swapped_indices, 3]\n\n all_predictions = np.array(all_predictions)\n\n # change 1D array in to 2D array even if it has only one row.\n if len(all_predictions.shape) == 1:\n all_predictions.shape = (1, -1)\n\n score_filter = all_predictions[:, 7] >= score_threshold\n all_predictions = all_predictions[score_filter]\n # If no predictions, skip to next file\n if len(all_predictions) == 0:\n np.savetxt(panoptic_predictions_3d_file_path, [])\n continue\n\n # Project to image space\n sample_name = prediction_file.split('.')[0]\n img_idx = int(sample_name)\n\n # Load image for truncation\n image = Image.open(dataset.get_rgb_image_path(sample_name))\n\n stereo_calib_p2 = calib_panoptic_utils.read_calibration(dataset.calib_dir,\n img_idx).HD_11\n\n boxes = []\n image_filter = []\n for i in range(len(all_predictions)):\n box_3d = all_predictions[i, 0:7]\n img_box = box_3d_panoptic_projector.project_to_image_space(\n box_3d, stereo_calib_p2,\n truncate=True, image_size=image.size)\n\n # Skip invalid boxes (outside image space)\n if img_box is None:\n image_filter.append(False)\n print('**ERROR img_box = ', img_box)\n continue\n\n image_filter.append(True)\n boxes.append(img_box)\n\n boxes = np.asarray(boxes)\n all_predictions = all_predictions[image_filter]\n\n # If no predictions, skip to next file\n if len(boxes) == 0:\n np.savetxt(panoptic_predictions_3d_file_path, [])\n continue\n\n num_valid_samples += 1\n\n # To keep each value in its appropriate position, an array of zeros\n # (N, 16) is allocated but only values [4:16] are used\n panoptic_predictions = np.zeros([len(boxes), 16])\n\n # Get object types\n all_pred_classes = all_predictions[:, 8].astype(np.int32)\n obj_types = [dataset.classes[class_idx]\n for class_idx in all_pred_classes]\n\n # Truncation and Occlusion are always empty (see below)\n\n # Alpha (Not computed)\n panoptic_predictions[:, 3] = -10 * np.ones((len(panoptic_predictions)),\n dtype=np.int32)\n\n # 2D predictions\n panoptic_predictions[:, 4:8] = boxes[:, 0:4]\n\n # 3D predictions\n # (l, w, h)\n panoptic_predictions[:, 8] = all_predictions[:, 5]\n panoptic_predictions[:, 9] = all_predictions[:, 4]\n panoptic_predictions[:, 10] = all_predictions[:, 3]\n # (x, y, z)\n panoptic_predictions[:, 11:14] = all_predictions[:, 0:3]\n # (ry, score)\n panoptic_predictions[:, 14:16] = all_predictions[:, 6:8]\n\n # Round detections to 3 decimal places\n panoptic_predictions = np.round(panoptic_predictions, 3)\n\n # Empty Truncation, Occlusion\n panoptic_empty_1 = -1 * np.ones((len(panoptic_predictions), 2),\n dtype=np.int32)\n\n # Stack 3D predictions text\n panoptic_text_3d = np.column_stack([obj_types,\n panoptic_empty_1,\n panoptic_predictions[:, 3:16]])\n\n # Save to text files\n np.savetxt(panoptic_predictions_3d_file_path, panoptic_text_3d,\n newline='\\r\\n', fmt='%s')\n\n print('\\nNum valid:', num_valid_samples)\n print('Num samples:', num_samples)", "def save_best_predictions(paths, metric):\n dir_res = '../results/'\n dir_predictions = '../results_best/predictions/'\n dir_dest = dir_predictions+metric+'/'\n\n if not os.path.exists(dir_predictions):\n os.mkdir(dir_predictions)\n\n if not os.path.exists(dir_dest):\n os.mkdir(dir_dest)\n\n for path in paths:\n dataset = path.split('/')[0].strip()\n modelo = path.split('/')[-2].strip()\n nombre = dataset + '_' + modelo + '.npy'\n\n if not os.path.exists(dir_dest + dataset):\n os.mkdir(dir_dest + dataset)\n\n shutil.copyfile(dir_res + path, dir_dest + '/' + dataset + '/' + nombre)", "def save(self, path):\n individual = self.population.fittest_individual()\n order = [int(l) for l in individual.label_order]\n fitness = individual.fitness\n data = {'name': self.ds.name,\n 'num_labels': len(order),\n 'order': order,\n 'fitness': fitness\n }\n with open(path, 'w') as f:\n json.dump(data, f)", "def create_and_write_output(predictions_path,output_path,inpDir):\n \n filenames= sorted(os.listdir(predictions_path)) \n for filename in filenames:\n \n # read the 3 channel output image from the neural network\n image=cv2.imread(os.path.join(predictions_path,filename))\n \n # create binary image output using the create_binary function\n out_image=create_binary(image) \n \n # read and store the metadata from the input image\n with BioReader(os.path.join(inpDir,filename)) as br:\n metadata = br.metadata\n\n # Write the binary output consisting of the metadata using bfio.\n output_image_5channel=np.zeros((out_image.shape[0],out_image.shape[1],1,1,1),dtype=np.uint8)\n output_image_5channel[:,:,0,0,0]=out_image \n\n with BioWriter(os.path.join(output_path,filename), metadata=metadata) as bw:\n bw.dtype = output_image_5channel.dtype\n bw.write(output_image_5channel)", "def output_predictions(pipeline):\n ##### Write code here #######\n X_train, y_train_true = load_data_file(TRAIN_FILE)\n X_dev, y_dev_true = load_data_file(DEV_FILE)\n X_test, y_test_true = load_data_file(TEST_FILE)\n\n #train pipeline with dev and train file\n pipeline.fit(X=X_train, y=y_train_true)\n pipeline.fit(X=X_dev, y=y_dev_true)\n\n y_pred_test = pipeline.predict(X=X_test)\n\n df = pd.DataFrame(y_pred_test)\n with open('predictions.tsv', 'w'):\n df.to_csv('predictions.tsv', sep='\\t', index=False, header=False)\n ##### End of your work ######", "def predict_kaggle(test_path, file_list): \n return gennet.predict_kaggle(test_path, file_list, 'Resnet50')", "def save_result(save_path, npyfile):\n for i, item in enumerate(npyfile):\n img = item[:, :, 0]\n io.imsave(os.path.join(save_path, '%d_pred.png' % i), img)", "def write_to_file(writer, data):\n feature = {\n \"text\": _int64_feature(data)\n }\n tf_example = tf.train.Example(features=tf.train.Features(feature=feature))\n writer.write(tf_example.SerializeToString())", "def write_to_file(writer, data):\n feature = {\n \"text\": _int64_feature(data)\n }\n tf_example = tf.train.Example(features=tf.train.Features(feature=feature))\n writer.write(tf_example.SerializeToString())", "def write_to_file(test_data: pd.DataFrame, estimator: Estimator,\n file_name: str):\n\n # we only need x_test\n x_test, y_test = estimator.split_model_data(test_data)\n\n # have the estimator return its predictions\n predictions = estimator.forecast(x_test)\n\n if predictions is None:\n sys.stdout.write(f\"Estimator returned no forecast,\"\n f\" has it been trained?\\n\")\n raise RuntimeError\n\n # slice the timestamps from given dataframe\n timestamps = test_data.loc[:, test_data.columns[0]].to_numpy()\n timestamps = np.array(timestamps, dtype='datetime64[s]')\n\n with open(file_name, mode=\"w\", newline=\"\") as output_file:\n # initialize csv writer\n prediction_writer = csv.writer(output_file,\n delimiter=\",\",\n quotechar=\"'\",\n quoting=csv.QUOTE_MINIMAL)\n\n # add header line\n prediction_writer.writerow([test_data.columns[0],\n test_data.columns[1]])\n\n #\n for i in range(len(timestamps)):\n timestamp = str(timestamps[i])\n prediction = str(round(predictions[i], 6))\n prediction_writer.writerow([timestamp, prediction])\n\n return", "def write_predictions(estimator, vertical, source_website, target_website):\n score_dir_path = os.path.join(\n FLAGS.result_path, \"{}/{}-results/score\".format(vertical, source_website))\n\n tf.gfile.MakeDirs(score_dir_path)\n pred_filename = os.path.join(\n FLAGS.result_path,\n \"{}/{}-results/score/{}.preds.txt\".format(vertical, source_website,\n target_website))\n node_emb_filename = os.path.join(\n FLAGS.result_path,\n \"{}/{}-results/score/{}.node_emb.npz\".format(vertical, source_website,\n target_website))\n print(\"Writing predictions to file: %s\" % pred_filename, file=sys.stderr)\n golds_gen = model_util.joint_generator_fn(\n get_data_path(\n vertical=vertical, website=target_website, dev=False, goldmine=False),\n get_data_path(\n vertical=vertical, website=target_website, dev=False, goldmine=True),\n vertical,\n mode=\"all\")\n transfer_eval_input_function = functools.partial(\n model_util.joint_input_fn,\n get_data_path(\n vertical=vertical, website=target_website, dev=False, goldmine=False),\n get_data_path(\n vertical=vertical, website=target_website, dev=False, goldmine=True),\n vertical,\n mode=\"all\")\n preds_gen = estimator.predict(transfer_eval_input_function)\n prediction_str = \"\"\n if FLAGS.extract_node_emb:\n node_embs = []\n for gold, pred in zip(golds_gen, preds_gen):\n if FLAGS.circle_features:\n ((nnodes), (_), (words_list, words_len), (_, _), (_, _),\n (partner_words, _), (friend_words, _), (_, _), (_, _),\n (html_path, xpath_list), (_, _), (_, _), (_)), tags = gold\n\n for index in range(nnodes):\n normalized_partner = []\n for w in partner_words[index]:\n normalized_partner.append(normalize_text(w))\n\n if FLAGS.match_keywords:\n normalized_word = [\n normalize_text(w)\n for w in words_list[index][:words_len[index]]\n ]\n candicate_labels = constants.ATTRIBUTES[vertical]\n print(\"Partner: %s, Words: %s, Pred: %s\" %\n (\" \".join(normalized_partner), \" \".join(normalized_word),\n pred[\"tags\"][index]))\n normalized_partner = \" \".join(normalized_partner)\n for i, l in enumerate(candicate_labels):\n l = str(l).lower().replace(\"tor\", \"t\").split(\"_\")\n status = all([x in normalized_partner for x in l])\n if status:\n print(\"OLD:\", pred[\"tags\"][index])\n print(\"NEW:\", candicate_labels[i].encode())\n pred[\"tags\"][index] = candicate_labels[i].encode()\n\n if FLAGS.friend_encoder:\n normalized_friend = []\n for w in friend_words[index]:\n normalized_friend.append(normalize_text(w))\n print(normalized_friend)\n print(pred[\"friends_embs\"][index])\n\n else:\n ((nnodes), (words_list, words_len), (_, _), (_, _), (_, _),\n (html_path, xpath_list), (_, _), (_), (_)), tags = gold\n assert nnodes == len(words_list) == len(tags)\n for index in range(nnodes):\n s = \"\\t\".join([\n str(html_path, \"utf-8\"),\n str(xpath_list[index], \"utf-8\"),\n \" \".join([\n str(w, \"utf-8\") for w in words_list[index][:int(words_len[index])]\n ]),\n str(tags[index], \"utf-8\"),\n str(pred[\"tags\"][index], \"utf-8\"),\n \",\".join([str(score) for score in pred[\"raw_scores\"][index]]),\n ]) + \"\\n\"\n prediction_str += s\n if FLAGS.extract_node_emb:\n node_embs.append([float(i) for i in pred[\"node_embs\"][index]])\n\n with tf.gfile.Open(pred_filename, \"w\") as f:\n f.write(prediction_str)\n\n node_embs = np.array(node_embs)\n # Save np.array to file.\n with tf.gfile.Open(node_emb_filename, \"wb\") as gfo:\n print(\"Writing node emb pickle: %s\" % node_emb_filename, file=sys.stderr)\n pickle.dump(node_embs, gfo)\n print(\"Node Representation Save- done.\", file=sys.stderr)", "def write_file(self, file_path, ids, X_texts, y_probs, y_labels, verbose=False):\n frame_list = []\n for id_, X_text, y_label, y_probs in zip(ids, X_texts, y_labels, y_probs):\n if verbose:\n row = [id_, \" \".join(X_text), y_label] + list(y_probs)\n columns = [u\"id\", u\"text\", u\"label\"] + list(self.label_encoder.classes_)\n else:\n row = [id_] + list(y_probs)\n columns = [\"id\"] + list(self.label_encoder.classes_)\n frame_list.append(row)\n\n data_frame = pd.DataFrame(frame_list, columns=columns)\n\n logger.info(\"Writing predictions to file '%s'.\" % file_path)\n data_frame.to_csv(file_path, encoding=\"utf-8\", index=False, quoting=csv.QUOTE_NONNUMERIC)", "def _write_keypoint_results(keypoint_results, gt_folder, pred_folder):", "def predict(self, input_path: str, output_path: str):\n test_file = Dataset.read_dataset(input_path)\n with open(output_path, \"w\") as out_file:\n for line in tqdm(test_file):\n words_pred = self.prediction_generator(line)\n out_file.write(\"\".join(words_pred).strip() + \"\\n\")", "def output_predictions(predictions, df_symbol_date, file_name):\n\n # Retrieve baaged prediction\n pred_df = predictions['deep_bagged_predictions']\n\n # Create dataframe by resetting the index to allow columns to be concatenated\n output_df = pd.concat([df_symbol_date.reset_index(\n drop=True), pred_df.reset_index(drop=True)], axis=1)\n\n # Save output to file\n pred_file_location = './predictions/' + file_name + '.csv'\n print('Writing predictions to', pred_file_location)\n output_df.to_csv(pred_file_location)", "def _get_output_filename(dataset_dir):\n return os.path.join(dataset_dir, 'pokemon.tfrecord')", "def write_to_train_file(files: List, train_file_path: str) -> None:\n f = open(train_file_path, \"w\")\n text_to_save = \"\"\n for i, img_path in enumerate(files):\n img_path_stripped = img_path.replace(\"/darknet\", \"\")\n if i == len(files) - 1:\n text_to_save += img_path_stripped\n else:\n text_to_save += img_path_stripped + \"\\n\"\n\n f.write(text_to_save)\n f.close()", "def predict(self):\n train_vec, test_vec = self.get_tfidf_vectors()\n clf = self.get_classifier()\n\n print '-'*40\n print 'Making predictions ...'\n clf.fit(train_vec, self.train_ans)\n clf_predictions = clf.predict_proba(test_vec)\n\n print 'Storing predictions in', self.pred_file\n pred_out = [\"Id,predictions\"]\n num_pred = range(30)\n for fid, pred in zip(self.test_index, clf_predictions):\n top_rec = sorted(num_pred, key=lambda k: pred[k], reverse=True)[:3]\n pred_out.append(\"%s,%s\" % (fid, ' '.join( [clf.classes_[rec] for rec in top_rec] )))\n with open(self.pred_file, 'w') as f:\n f.write('%s\\n' % ('\\n'.join(pred_out)))", "def save_model(self):\n print(\"\\nModels are integrated to be multi scale.\\nSaving to disk.\")\n self.column_names = [ \"x_\" + str(x) for x in range(self.embedding.shape[1])]\n self.embedding = pd.DataFrame(self.embedding, columns = self.column_names)\n self.embedding.to_csv(self.args.output, index = None)", "def __create_uncertain_pairs_file(self):\n\n with open(os.getcwd() + \"/\" + self.training_file_name, \"w\") as fjson:\n json.dump(\n self.labeled_examples,\n fjson,\n default=self.to_json,\n tuple_as_array=False\n )", "def write_predictions(self, file: str, out_file: str, fee_only: bool = False):\n\n if not fee_only and self.network is None:\n raise Exception(\"No network found! Train or load a network.\")\n\n xs, ys = get_dataset([file])\n\n if not fee_only:\n dataset_iter = self.prepare_dataset(xs, ys, 1)\n predictions = self.network.predict(dataset_iter)\n prediction = iter(predictions)\n\n out_data = []\n sent_count = 0\n last_sentence = []\n\n for x in xs:\n if last_sentence != x[1:]:\n if not sent_count == 0:\n out_data.append(data_dict)\n\n data_dict = dict()\n data_dict[\"sentence\"] = x[1:]\n data_dict[\"sentence_id\"] = sent_count\n data_dict[\"prediction\"] = []\n last_sentence = x[1:]\n sent_count += 1\n frame_count = 0\n\n prediction_dict = dict()\n prediction_dict[\"id\"] = frame_count\n prediction_dict[\"fee\"] = x[0]\n if not fee_only:\n prediction_dict[\"frame\"] = self.output_field.vocab.itos[next(prediction).item()]\n\n data_dict[\"prediction\"].append(prediction_dict)\n\n frame_count += 1\n\n out_data.append(data_dict)\n\n with open(out_file, \"w\") as out:\n json.dump(out_data, out, indent=4)", "def __save_datasets(self):\n self.train.to_csv('{}/{}/{}'.format(path_to_train_set, img_format, 'train.csv'))\n self.valid.to_csv('{}/{}/{}'.format(path_to_valid_set, img_format, 'valid.csv'))\n self.test.to_csv('{}/{}/{}'.format(path_to_test_set, img_format, 'test.csv'))", "def save_training(self):\n\n filename = str(hashlib.sha1(str(self.training_data).encode(\"utf-8\"))\n .hexdigest())\n path = \"./training/\" + filename + \".json\"\n\n data = {\n \"states\": self.states,\n \"transitions\": self.transitions,\n \"matrix\": self.matrix.tolist()\n }\n\n with open(path, \"w\") as outfile:\n json.dump(data, outfile)", "def predict(self, X, path):\n\t\tclassifier = self.classifier.fit(self.X, self.y)\n\t\ty_pred = pd.DataFrame(classifier.predict(X), columns=['prediction'])\n\t\ty_prob = pd.DataFrame(np.around(classifier.predict_proba(X), decimals=4), columns=classifier.classes_)\n\t\toutput = pd.concat([X, y_pred, y_prob], axis=1)\n\t\toutput.to_csv(path, sep=',', index=False) \t\t# save to file", "def predict(input_path, model_path, output_path):\n\n logger = logging.getLogger(__name__)\n\n logger.info(\"Loading input dataset\")\n X_pred = pd.read_csv(input_path)\n\n logger.info(\"Loading model\")\n model = joblib.load(model_path)\n\n logger.info(\"Generating predictions\")\n predictions = model.predict(X_pred)\n prediction_df = pd.DataFrame({\"predictions\": predictions})\n\n logger.info(f\"Writing output to {output_path}\")\n output_dir = Path(output_path).parent\n output_dir.mkdir(parents=True, exist_ok=True)\n prediction_df.to_csv(output_path, index=False)", "def save_to_disk(self, file_name = \"vehicle_classifier.pkl\"):\n self.classifier.save_to_disk(file_name)", "def write_model_results(model, input_file, repr, tags, outpath):\n input, input_data = read_input(input_file)\n\n if repr == \"c\":\n x = utils.get_features(input, ixs=3)\n else:\n x = utils.get_features(input, chars=True)\n\n w_batcher = utils.AutoBatcher(x, x, batch_size=1, shuffle=False)\n labels = []\n for inputs, _ in w_batcher.get_batches():\n output = torch.max(model(inputs), 1)[1]\n labels += output.cpu().data.numpy().tolist()\n\n predictions = utils.NEWLINE.join([\"{} {}\".format(input_data[i], tags[labels[i]])\\\n for i in range(len(input_data))])\n with open(outpath, \"w\") as outfile:\n outfile.write(predictions)", "def write_results(file_path, predictions):\n with open(file_path, \"w\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\",\")\n writer.writerow([\"Id\", \"Bound\"])\n for id, bound in enumerate(predictions):\n writer.writerow([id, bound])", "def export_prediction_to_example(filename, pred_geo, pred_sem):\n with tf.python_io.TFRecordWriter(filename) as writer:\n out_feature = {\n 'prediction_df/dim': util.int64_feature(pred_geo.shape),\n 'prediction_df': util.float_feature(pred_geo.flatten().tolist())\n }\n if FLAGS.predict_semantics:\n out_feature['prediction_sem'] = util.bytes_feature(\n pred_sem.flatten().tobytes())\n example = tf.train.Example(features=tf.train.Features(feature=out_feature))\n writer.write(example.SerializeToString())", "def make_predictions(self, metrics: dict) -> None:\n\n y_pred = self.model.predict(self.X_test)\n predictions = pd.DataFrame(\n {self.title: self.titles_test, \"y_pred\": y_pred, \"y_true\": self.y_test}\n )\n file_name = f'../predictions/regression/predictions_{self.model.__class__.__name__}_{datetime.now().strftime(\"%Y_%m_%d_%H_%M_%S\")}'\n os.makedirs(\n os.path.dirname(file_name), exist_ok=True\n ) # Ensure the directory exists\n predictions.to_csv(file_name + \".csv\", index=False)\n with open(file_name + \"_metrics.txt\", \"w\") as f:\n f.write(f\"Metrics: {metrics}\")\n logging.info(\"Predictions saved to file.\")", "def save(self, model_name = 'mr-senti'):\n\n\t\tjoblib.dump(self.classifier, os.path.join('model', model_name + '.pkl'))", "def submit_predictions(\n sub_name: str, predictions: jnp.ndarray, id_col: jnp.array\n):\n with open(os.path.join(\"data\", sub_name), \"w\") as sub_file:\n sub_file.write(\"Id,SalePrice\\n\")\n for (example_id, pred) in zip(id_col, jnp.squeeze(predictions)):\n sub_file.write(f\"{example_id},{pred}\\n\")", "def to_file(self, fn):\n store.store_dict(fn, 'trainalgorithm', self.to_dict())", "def but_pred(self):\n if not self.nn_obj and not self.path:\n tk.messagebox.showerror(\"Error\", \"Open file and create NN\")\n return\n elif not self.nn_obj:\n tk.messagebox.showerror(\"Error\", \"Create NN\")\n return\n elif not self.path:\n tk.messagebox.showerror(\"Error\", \"Open file first\")\n return\n out=pred(self.nn_obj, self.nn_in)\n if platform == \"linux\" or platform == \"linux2\":\n path=tk.filedialog.asksaveasfilename(filetypes = [('Prediction file','.txt')])\n elif platform == \"win32\":\n path=tk.filedialog.asksaveasfilename(filetypes = [('Prediction file','.txt')], defaultextension=\"*.*\")\n else:\n path=tk.filedialog.asksaveasfilename(filetypes = [('Prediction file','.txt')])\n np.savetxt(path, np.c_[np.array(self.nn_in), out], fmt='%1.3f')", "def save_features_to_file(self):\n if not os.path.exists(self.features_save_path):\n os.makedirs(self.features_save_path)\n for s in self.sets:\n self.save_features_to_file_by_set(s)", "def save_file(self, filename):\n if self.t3data:\n np.savetxt(filename, self.t3data)\n else:\n self.export_to_ascii()", "def save_ml_output(arrays, out_fname, force):\n if not force:\n if os.path.isfile(out_fname):\n return\n try:\n os.makedirs(os.path.dirname(out_fname))\n except FileExistsError:\n pass\n np.save(out_fname, arrays, allow_pickle=False)", "def files():\r\n fn=pd.read_csv(request.files.get('fnm'))\r\n scaling = scaler.transform(fn)\r\n prediction = classifier.predict(scaling)\r\n return 'Predictions'+ str(list(prediction))", "def create_submission_file(classifiers, preprocessor, batch_size, classification_threshold=0.2):\r\n x_test_filename = preprocessor.X_test\r\n steps = len(x_test_filename) / batch_size\r\n y_map = preprocessor.y_map\r\n predictions = None\r\n for classifier in classifiers:\r\n test_gen = preprocessor.get_prediction_generator(batch_size)\r\n predictions_tmp = classifier.predict_gen(test_gen, steps)\r\n if predictions is None:\r\n predictions = predictions_tmp\r\n else:\r\n predictions += predictions_tmp\r\n\r\n predictions = predictions / len(classifiers)\r\n logger.info(\"Predictions shape: {}\\nFiles name shape: {}\\n1st predictions entry:\\n{}\".format(predictions.shape,\r\n x_test_filename.shape,\r\n predictions[0]))\r\n\r\n thresholds = [classification_threshold] * len(y_map)\r\n\r\n predicted_labels = classifier.map_predictions(predictions, y_map, thresholds)\r\n\r\n # Finally lets assemble and visualize our prediction for the test dataset\r\n tags_list = [None] * len(predicted_labels)\r\n for i, tags in enumerate(predicted_labels):\r\n tags_list[i] = ' '.join(map(str, tags))\r\n\r\n final_data = [[filename.split(\".\")[0], tags] for filename, tags in zip(x_test_filename, tags_list)]\r\n\r\n final_df = pd.DataFrame(final_data, columns=['image_name', 'tags'])\r\n\r\n # And save it to a submission file\r\n final_df.to_csv('../submission_file.csv', index=False)\r\n classifier.close()\r\n return None", "def save(self, filename):\n ext = os.path.splitext(filename)[1]\n if ext == '.pkl':\n print 'saving trainer params to a pkl file'\n self.save_pkl(filename)\n else:\n print 'saving trainer params to a hdf5 file'\n self.save_h5(filename)", "def save_predicted_results(predicted_results):\n # Save the model\n with open(\"predicted_results\", \"wb\") as predicted_results_file:\n pickle.dump(predicted_results, predicted_results_file)", "def save_model(self):\n self.pred_net.save((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.save((self.save_path / \"iqn_target_net\").absolute().as_posix())", "def write_file(country, season, final, var):\n if var=='label':\n path='../results/kmeans/'\n elif var=='cluster':\n path='../results/sequence_analysis/'\n country_ = country.lower()\n season_ = season.replace('-','_')\n file_name=country_+\"_\"+season_\n newpath=path+file_name+'/'\n if not os.path.exists(newpath):\n os.makedirs(newpath)\n f = open(newpath+file_name+\".txt\",\"w\") \n f.write(final)\n f.close()", "def save_data(data_dir):\r\n for k in range(1,11):\r\n fold_name = 'fold' + str(k)\r\n print \"Saving\" + fold_name\r\n features, labels = process_audio(parent_path, [fold_name])\r\n labels = encode(labels)\r\n print \"Features of\", fold_name , \" = \", features.shape\r\n print \"Labels of\", fold_name , \" = \", labels.shape\r\n feature_file = os.path.join(data_dir, fold_name + '_x.npy')\r\n labels_file = os.path.join(data_dir, fold_name + '_y.npy')\r\n np.save(feature_file, features)\r\n print \"Saved \" + feature_file\r\n np.save(labels_file, labels)\r\n print \"Saved \" + labels_file", "def create_train(train_img_path):\n\n f = open(\"train.txt\", \"w+\")\n for subdirs, dirs, files in os.walk(train_img_path):\n for filename in files:\n if filename.endswith(\".jpg\"):\n train_image_path = os.path.join(train_img_path, filename)\n print(train_image_path)\n f.write(train_image_path + \"\\n\")\n f.close()", "def write_label_file(labels_to_class_names, dataset_dir, filename='labels.txt'):\n labels_filename = os.path.join(dataset_dir, filename)\n with tf.gfile.Open(labels_filename, 'w') as f:\n for label in labels_to_class_names:\n class_name = labels_to_class_names[label]\n f.write('%d:%s\\n' % (label, class_name))", "def savePredictedLenses(des_names_array, predicted_class_probabilities, predicted_lenses_filepath, text_file_path):\n if not os.path.exists(predicted_lenses_filepath):\n os.mkdir('%s/' % predicted_lenses_filepath)\n text_file = open('%s' % text_file_path, \"a+\")\n text_file.write('Predicted Lenses: \\n')\n for lens_index in range(len(predicted_class_probabilities)):\n if predicted_class_probabilities[lens_index] == 1:\n text_file.write(\"%s \\n \" % des_names_array[lens_index])\n\n text_file.write('\\n')\n text_file.write('\\n')\n\n text_file.write('No Lenses Predicted: \\n')\n for lens_index in range(len(predicted_class_probabilities)):\n if predicted_class_probabilities[lens_index] == 0:\n text_file.write(\"%s \\n \" % des_names_array[lens_index])\n text_file.close()", "def predict(classifier, data):\n print(\"Beggining to classify data\")\n results = classifier.predict(data)\n results = pd.DataFrame(results)\n results.index += 1\n results.to_csv(\"out/results.csv\", header=[\"Label\"], index=True, index_label=[\"ImageId\"])\n print(\"Finished classifying data\")", "def save_predictions(path: str, wrapper, results: Dict):\n predictions_with_idx = []\n\n if wrapper.task_helper and wrapper.task_helper.output:\n predictions_with_idx = wrapper.task_helper.output\n else:\n inv_label_map = {idx: label for label,\n idx in wrapper.label_map.items()}\n for idx, prediction_idx in zip(results['indices'], results['predictions']):\n prediction = inv_label_map[prediction_idx]\n idx = idx.tolist() if isinstance(idx, np.ndarray) else int(idx)\n predictions_with_idx.append({'idx': idx, 'label': prediction})\n\n with open(path, 'w', encoding='utf8') as fh:\n for line in predictions_with_idx:\n fh.write(json.dumps(line) + '\\n')", "def save_processed_dataset(name, df):\n assert name in VALID_NAMES, 'Invalid data set requested. Please make sure name is one of ' + ', '.join(\n VALID_NAMES) + '.'\n path = os.path.join('downloads', name)\n path_processed = os.path.join(path, 'processed')\n\n if name == 'iris':\n df.to_csv(os.path.join(path_processed, 'iris.csv'), index=False)\n return\n\n elif name == 'wine':\n df.to_csv(os.path.join(path_processed, 'wine.csv'), index=False)\n return\n\n elif name == 'titanic':\n df.to_csv(os.path.join(path_processed, 'titanic.csv'), index=False)\n return\n\n elif name == 'lanl':\n with open(os.path.join(path_processed, 'train_data.pkl'), 'wb') as f:\n pkl.dump(df, f)\n with open(os.path.join(path_processed, 'train_targets.pkl'), 'wb') as f:\n pkl.dump(df, f)\n return\n\n elif name == 'MNIST' or name == 'FashionMNIST':\n print('Already handled in notebook.')\n return", "def save(self,filename):\n f = open(filename,'w')\n f.write('Test results for %s v%s\\n' % (self.description,self.version))\n f.write('Series ran by %s\\n\\n' % self.person_name)\n for result in self.values():\n f.write('%-70s : %s\\n' % (result.id,result.outcome))\n if result.outcome != Result.PASS:\n for (kind, annotation) in result.annotations.items():\n f.write('%s:\\n%s\\n' % (kind, as_utf8(annotation)))\n f.write('\\n')\n f.write('\\n\\nPasses: %i\\n' % self.get_pass_count())\n f.write('Fails: %i\\n' % self.get_fail_count())\n f.write('Errors: %i\\n' % self.get_error_count())\n f.write('Untested: %i\\n' % self.get_untested_count())\n f.write('Skipped: %i\\n' % self.get_skipped_count())\n f.close()", "def dump(pred_out_path, xyz_pred_list, verts_pred_list):\n # make sure its only lists\n xyz_pred_list = [x.tolist() for x in xyz_pred_list]\n verts_pred_list = [x.tolist() for x in verts_pred_list]\n #import pdb; pdb.set_trace()\n # save to a json\n with open(pred_out_path, 'w') as fo:\n json.dump(\n [\n xyz_pred_list,\n verts_pred_list\n ], fo)\n print('Dumped %d joints and %d verts predictions to %s' % (len(xyz_pred_list), len(verts_pred_list), pred_out_path))", "def write_predictions(all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, output_prediction_file,\n output_nbest_file, verbose_logging):\n logger.info(\"Writing predictions to: %s\" % (output_prediction_file))\n logger.info(\"Writing nbest to: %s\" % (output_nbest_file))\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"PrelimPrediction\",\n [\"feature_index\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\", \"label_logit\"])\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n for (example_index, example) in enumerate(all_examples):\n features = example_index_to_features[example_index]\n\n prelim_predictions = []\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature.unique_id]\n\n start_indexes = _get_best_indexes(result.start_logits, n_best_size)\n end_indexes = _get_best_indexes(result.end_logits, n_best_size)\n for start_index in start_indexes:\n for end_index in end_indexes:\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index >= len(feature.tokens):\n continue\n if end_index >= len(feature.tokens):\n continue\n if start_index not in feature.token_to_orig_map:\n continue\n if end_index not in feature.token_to_orig_map:\n continue\n if not feature.token_is_max_context.get(start_index, False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_index,\n end_index=end_index,\n start_logit=result.start_logits[start_index],\n end_logit=result.end_logits[end_index],\n label_logit=result.label_logits))\n\n prelim_predictions = sorted(\n prelim_predictions,\n key=lambda x: (x.start_logit + x.end_logit),\n reverse=True)\n\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"NbestPrediction\", [\"text\", \"start_logit\", \"end_logit\", \"label_logit\"])\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n\n tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]\n orig_doc_start = feature.token_to_orig_map[pred.start_index]\n orig_doc_end = feature.token_to_orig_map[pred.end_index]\n orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]\n tok_text = \" \".join(tok_tokens)\n\n # De-tokenize WordPieces that have been split off.\n tok_text = tok_text.replace(\" ##\", \"\")\n tok_text = tok_text.replace(\"##\", \"\")\n\n # Clean whitespace\n tok_text = tok_text.strip()\n tok_text = \" \".join(tok_text.split())\n orig_text = \" \".join(orig_tokens)\n\n final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n nbest.append(\n _NbestPrediction(\n text=final_text,\n start_logit=pred.start_logit,\n end_logit=pred.end_logit,\n label_logit=pred.label_logit))\n\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(\n _NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0, label_logit=0.0))\n\n assert len(nbest) >= 1\n\n total_scores = []\n for entry in nbest:\n total_scores.append(entry.start_logit + entry.end_logit)\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = probs[i]\n output[\"start_logit\"] = entry.start_logit\n output[\"end_logit\"] = entry.end_logit\n output[\"label_logit\"] = entry.label_logit\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1\n\n if nbest_json[0][\"label_logit\"] and (example.qas_id not in all_predictions.keys() or all_nbest_json[example.qas_id][0][\"probability\"] < nbest_json[0][\"probability\"]):\n all_predictions[example.qas_id] = nbest_json[0][\"text\"]\n all_nbest_json[example.qas_id] = nbest_json\n\n with open(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4) + \"\\n\")\n\n with open(output_nbest_file, \"w\") as writer:\n writer.write(json.dumps(all_nbest_json, indent=4) + \"\\n\")", "def save2file(self):\n ids_input = []\n labels_input = []\n ids_path = os.path.join(self.path, 'ids')\n if not os.path.exists(ids_path):\n os.makedirs(ids_path)\n labels_path = os.path.join(self.path, 'labels')\n if not os.path.exists(labels_path):\n os.makedirs(labels_path)\n ids_total = len(self.test)\n for i in range(ids_total):\n ids_input = self.test[i][0]\n labels_input = self.test[i][1]\n file_name = \"ids/\" + str(i) + \".bin\"\n file_path = os.path.join(self.path, file_name)\n np.array(ids_input, dtype=np.int32).tofile(file_path)\n file_name = \"labels/\" + str(i) + \".bin\"\n file_path = os.path.join(self.path, file_name)\n np.array(labels_input, dtype=np.int32).tofile(file_path)\n print(\"\\n ****** Success! ******\\n \")", "def _get_output_filename(dataset_dir, split_name):\n return '%s/cifar100_%s.tfrecord' % (dataset_dir, split_name)", "def write_labels():\n with open('../data/labels.txt', 'w') as labels_file:\n labels = generate_labels()\n labels_file.write('\\n'.join(labels))", "def export_data(self):\n folder = os.path.dirname(self.filename[0])\n filename_ext = os.path.basename(self.filename[0])\n filename = os.path.splitext(filename_ext)[0] #get filename without extension\n\n path = folder + \"/\" + filename + \"_fit_results.txt\"\n if not os.path.exists(path):\n file = open(path, \"w+\")\n else:\n file = open(path, \"a+\")\n\n for i in range(len(self.data_list)):\n file.write(self.data_list[i] + \"\\n\\n\")\n\n self.data_list = []\n file.close()", "def write_predictions_to_s3(self, fold_predictions: pd.DataFrame, output_path: str):\n \n # prepare dataframe\n prediction_columns = fold_predictions.columns[['prediction_' == x[:11] for x in fold_predictions.columns]].tolist()\n fold_predictions = fold_predictions[fold_predictions.train_or_test == 'test'] # only save test set\n fold_predictions = fold_predictions[['sf_account_id'] + prediction_columns] # only save salesforce ID and prediction columns\n fold_predictions.columns = ['sf_account_id'] + [x[11:] for x in prediction_columns] # remove predicted_ from column names\n \n # write to S3\n\n now_timestamp = str(pd.Timestamp.now()).split(\".\")[0]\n output_object = f'{output_path}propensity_{now_timestamp}.csv'\n csv_string = fold_predictions.to_csv(index=False)\n\n if 's3' in output_path:\n fs = s3fs.S3FileSystem()\n with fs.open(output_object, 'wb') as f:\n f.write(csv_string.encode())\n else:\n with open(output_object, 'wb') as f:\n f.write(csv_string.encode())\n\n return output_object", "def write_label_file(labels_to_class_names, labels_filename):\n with tf.gfile.Open(labels_filename, \"w\") as f:\n for label in labels_to_class_names:\n class_name = labels_to_class_names[label]\n f.write('%d:%s\\n'%(label, class_name))", "def _get_output_filename(dataset_dir, split_name):\n return '%s/fer_%s.tfrecord' % (dataset_dir, split_name)", "def fetch_training_data(filename, output, db_url=None):\n r2dt.write_training_data(filename, db_url, output)", "def writePredictions(outfile, pred, proba, y, data, evalmode=False):\n if evalmode:\n header = ['chr', 'start', 'end', 'prediction', 'true label']\n for i in range(np.shape(proba)[1]):\n header.append(\"probability:\"+str(i))\n pd.DataFrame(np.concatenate((data.values[:,0:3],np.transpose(pred[np.newaxis]).astype(int),np.transpose(y[np.newaxis]), proba), axis=1)[:,:]).to_csv(outfile, sep=\"\\t\", index=None, header=header)\n else:\n header = ['chr', 'start', 'end', 'prediction']\n for i in range(np.shape(proba)[1]):\n header.append(\"probability:\"+str(i))\n pd.DataFrame(np.concatenate((data.values[:,0:3],np.transpose(pred[np.newaxis]).astype(int), proba), axis=1)[:,:]).to_csv(outfile, sep=\"\\t\", index=None, header=header)", "def batch_predict(args, ilastik_args):\n # Create the folder for the intermediate results.\n if not os.path.isdir(args.cache):\n os.makedirs(args.cache)\n\n # Find the random forest files.\n rf_files = autocontext_forests(args.batch_predict)\n n = len(rf_files)\n\n # Get the output format arguments.\n default_output_format = \"hdf5\"\n default_output_filename_format = os.path.join(args.cache, \"{nickname}_probs.h5\")\n ilastik_parser = argparse.ArgumentParser()\n ilastik_parser.add_argument(\"--output_format\", type=str, default=default_output_format)\n ilastik_parser.add_argument(\"--output_filename_format\", type=str, default=default_output_filename_format)\n ilastik_parser.add_argument(\"--output_internal_path\", type=str, default=default_export_key())\n format_args, ilastik_args = ilastik_parser.parse_known_args(ilastik_args)\n output_formats = [default_output_format] * (n-1) + [format_args.output_format]\n if args.no_overwrite:\n output_filename_formats = [default_output_filename_format[:-3] + \"_%s\" % str(i).zfill(2) + default_output_filename_format[-3:] for i in xrange(n-1)] + [format_args.output_filename_format]\n else:\n output_filename_formats = [default_output_filename_format] * (n-1) + [format_args.output_filename_format]\n output_internal_paths = [default_export_key()] * (n-1) + [format_args.output_internal_path]\n\n # Reshape the data to tzyxc and move it to the cache folder.\n outfiles = []\n keep_channels = None\n for i in xrange(len(args.files)):\n # Read the data and attach axistags.\n filename = args.files[i]\n if \".h5/\" in filename or \".hdf5/\" in filename:\n data_key = os.path.basename(filename)\n data_path = filename[:-len(data_key)-1]\n data = vigra.readHDF5(data_path, data_key)\n else:\n data_key = default_export_key()\n data_path_base, data_path_ext = os.path.splitext(filename)\n data_path = data_path_base + \".h5\"\n data = vigra.readImage(filename)\n if not hasattr(data, \"axistags\"):\n default_tags = {1: \"x\",\n 2: \"xy\",\n 3: \"xyz\",\n 4: \"xyzc\",\n 5: \"txyzc\"}\n data = vigra.VigraArray(data, axistags=vigra.defaultAxistags(default_tags[len(data.shape)]),\n dtype=data.dtype)\n new_data = reshape_tzyxc(data)\n\n if i == 0:\n c_index = new_data.axistags.index(\"c\")\n keep_channels = new_data.shape[c_index]\n\n # Save the reshaped dataset.\n output_filename = os.path.split(data_path)[1]\n output_filename = os.path.join(args.cache, output_filename)\n vigra.writeHDF5(new_data, output_filename, data_key, compression=args.compression)\n args.files[i] = output_filename + \"/\" + data_key\n if args.no_overwrite:\n outfiles.append([os.path.splitext(output_filename)[0] + \"_probs_%s.h5\" % str(i).zfill(2) for i in xrange(n-1)])\n else:\n outfiles.append([os.path.splitext(output_filename)[0] + \"_probs.h5\"] * (n-1))\n assert keep_channels > 0\n\n # Run the batch prediction.\n for i in xrange(n):\n rf_file = rf_files[i]\n output_format = output_formats[i]\n output_filename_format = output_filename_formats[i]\n output_internal_path = output_internal_paths[i]\n\n filename_key = os.path.basename(args.files[0])\n filename_path = args.files[0][:-len(filename_key)-1]\n\n # Quick hack to prevent the ilastik error \"wrong number of channels\".\n p = ILP(rf_file, args.cache, compression=args.compression)\n for j in xrange(p.data_count):\n p.set_data_path_key(j, filename_path, filename_key)\n\n # Call ilastik to run the batch prediction.\n cmd = [args.ilastik,\n \"--headless\",\n \"--project=%s\" % rf_file,\n \"--output_format=%s\" % output_format,\n \"--output_filename_format=%s\" % output_filename_format,\n \"--output_internal_path=%s\" % output_internal_path]\n\n if args.predict_file:\n pfile = os.path.join(args.cache, \"predict_file.txt\")\n with open(pfile, \"w\") as f:\n for pf in args.files:\n f.write(os.path.abspath(pf) + \"\\n\")\n cmd.append(\"--predict_file=%s\" % pfile)\n else:\n cmd += args.files\n\n print col.Fore.GREEN + \"- Running autocontext batch prediction round %d of %d -\" % (i+1, n) + col.Fore.RESET\n subprocess.call(cmd, stdout=sys.stdout)\n\n if i < n-1:\n # Merge the probabilities back to the original file.\n for filename, filename_out in zip(args.files, outfiles):\n filename_key = os.path.basename(filename)\n filename_path = filename[:-len(filename_key)-1]\n merge_datasets(filename_path, filename_key, filename_out[i], output_internal_path, n=keep_channels,\n compression=args.compression)", "def pred_dir(self, x, debug = False):\n # Create prediction directory\n logger.info(f'Create directory: {self.test_pred_path}')\n os.makedirs(self.test_pred_path, exist_ok=True)\n \n # Predict on all images in directory\n images = os.listdir(self.test_path)\n logger.info(f'Predict on images in {self.test_path}, n_images = {len(images)}')\n for im_name in images:\n im_path = os.path.join(self.test_path, im_name)\n im_save_path = os.path.join(self.test_pred_path, im_name)\n \n image = Image.open(im_path)\n #image = image.convert('L').convert('RGB')\n \n logger.info(f'Path: {im_path}')\n \n pred_dict = self.predict({'output': image}, debug = True)\n image_pred = pred_dict[0]['image']\n image_pred = np.array(image_pred)\n #image_pred = image_pred[:, :, ::-1]\n \n logger.info(f'Save to: {im_save_path}')\n \n # Save prediction as tif\n cv2.imwrite(im_save_path[:-4] + '.tif', image_pred)", "def predict_labels(model):\n test_datagen = ImageDataGenerator(featurewise_center=True,\n featurewise_std_normalization=True\n #rescale=1. / 255,\n #samplewise_center=True,\n #samplewise_std_normalization=True\n )\n test_datagen.fit(test_data)\n # datagen.fit(val_data)\n # create generator for train data\n test_generator = test_datagen.flow(\n test_data,\n batch_size=batch_size,\n shuffle=False)\n pred_prob=model.predict_generator(test_generator,test_data.shape[0])\n pred_prob=pred_prob[:,0]\n def pre_class(x):\n \tif x<0.5:\n return 0\n else:\n return 1\n #def true_label(id):\n #\tif 'f0' in id:\n #\t return 0\n # elif 'f1' in id: \n # return 1\n #\telse:\n #\t pass\n #pred_true=map(true_label,test_id)\n #pred_true=np.array(pred_true)\n #print roc_auc_score(val_target, pred_prob)\n #prediction=map(pre_class,pred_prob)\n #print confusion_matrix(val_target,prediction)\n with open(\"prediction.csv\", \"w\") as f: \n\tp_writer = csv.writer(f, delimiter=',', lineterminator='\\n')\n for id,label in zip(test_id,pred_prob):\n\t p_writer.writerow([id, label])\n\t\n #base_path = \"PZ/test/test/\"\n\n #with open(\"prediction.csv\", \"w\") as f:\n # p_writer = csv.writer(f, delimiter=',', lineterminator='\\n')\n # for _, _, imgs in os.walk(base_path):\n # for im in imgs:\n # pic_id = im.split(\".\")[0]\n #img = cv2.imread(base_path+im)\n #img = cv2.resize(img, (img_width, img_height), cv2.INTER_LINEAR)\n #img = img.transpose((2,0,1))\n #img = np.expand_dims(img,axis=0)\n #img = load_img(base_path + im)\n #img = imresize(img, size=(img_height, img_width))\n #test_x = img_to_array(img).reshape(3, img_height, img_width)\n #test_x = test_x.reshape((1,) + test_x.shape)\n #test_datagen.fit(img)\n #test_generator = test_datagen.flow(img,\n # batch_size=1,\n # shuffle=False)\n #prediction = model.predict_generator(test_generator, 1)\n #p_writer.writerow([pic_id, prediction])" ]
[ "0.79836786", "0.7524809", "0.7184881", "0.7017586", "0.7017586", "0.7015609", "0.6925001", "0.68141526", "0.67422944", "0.6684592", "0.6629441", "0.6625324", "0.66196924", "0.66170734", "0.6532637", "0.65058", "0.65011615", "0.64889866", "0.64260775", "0.63849616", "0.6359895", "0.63512397", "0.6345937", "0.63078797", "0.63025934", "0.62755555", "0.6272432", "0.62519073", "0.62505454", "0.62501913", "0.6247042", "0.6162269", "0.6142979", "0.61404383", "0.6127851", "0.6125878", "0.6118618", "0.6111454", "0.60951596", "0.6093843", "0.60835826", "0.60724396", "0.6067176", "0.6067176", "0.605072", "0.60405415", "0.60226643", "0.60164404", "0.59980124", "0.59960043", "0.5992555", "0.59866196", "0.59688896", "0.5959342", "0.59269506", "0.591915", "0.58924353", "0.588753", "0.5886559", "0.58680534", "0.5863318", "0.5859901", "0.5854515", "0.5853986", "0.58517176", "0.584397", "0.5835076", "0.58296883", "0.582795", "0.58251995", "0.5822151", "0.5820707", "0.5820353", "0.5818115", "0.58180743", "0.58124566", "0.5810917", "0.5796917", "0.5794237", "0.5785006", "0.57846934", "0.57805187", "0.5767374", "0.57616264", "0.57538193", "0.5745741", "0.5745739", "0.57441676", "0.5743082", "0.57395333", "0.57339704", "0.572445", "0.5715666", "0.57147855", "0.57137716", "0.5704884", "0.5695789", "0.5693914", "0.56925017", "0.5689202" ]
0.6097874
38
Return RMSLE from the prediction and the expected answer.
def get_RMSLE(pred, truth): assert len(pred) == len(truth) diff_vect = np.log(pred + 1) - np.log(truth + 1) diff_sum = np.sum(np.power(diff_vect, 2)) return np.sqrt(diff_sum / len(pred))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rmsle(actual, predicted, *args, **kwargs):\n return np.sqrt(msle(actual, predicted))", "def RMSLE(prediction, real):\n logarithmic_error = np.log1p(prediction) - np.log1p(real)\n score = np.sqrt(1/len(real) *np.sum(logarithmic_error**2))\n return score", "def rmsle(y_true, y_pred):\n assert y_true.shape == y_pred.shape, \\\n ValueError(\"Mismatched dimensions between input vectors: {}, {}\".format(y_true.shape, y_pred.shape))\n return np.sqrt((1/len(y_true)) * np.sum(np.power(np.log(y_true + 1) - np.log(y_pred + 1), 2)))", "def rmsle(self) -> float:\n return float(np.sqrt(np.mean(np.power(np.log1p(self.predicted) - np.log1p(self.true), 2))))", "def msle(actual, predicted):\n return np.mean(sle(actual, predicted))", "def compare_rmse(x_true, x_pred):\n x_true, x_pred = x_true.astype(np.float32), x_pred.astype(np.float32)\n return np.linalg.norm(x_true - x_pred) / (np.sqrt(x_true.shape[0] * x_true.shape[1] * x_true.shape[2]))", "def calc_rmsle(y: np.ndarray, y_hat: np.ndarray) -> float:\n pass", "def reserrorcalc(test_set, model):\n # Extracting X\n X = test_set[:,:-1]\n\n # Extracting labels\n Y = test_set[:,-1]\n residual_err = sum((model.predict(X) - Y) ** 2)\n return residual_err", "def compute_RMSE(true_val, predicted_val, p_output=True) -> float:\n from sklearn.metrics import mean_squared_error\n rms = np.sqrt(mean_squared_error(np.array(true_val), predicted_val))\n if p_output:\n print('RMSE: {0}'.format(rms))\n return rms", "def sle(actual, predicted):\n return (np.power(np.log(np.array(actual) + 1) -\n np.log(np.array(predicted) + 1), 2))", "def msle(self, weights=None) -> float:\n return float(np.average((np.log1p(self.true) - np.log1p(self.predicted)) ** 2, axis=0, weights=weights))", "def rmspe(self) -> float:\n return float(np.sqrt(np.mean(np.square(((self.true - self.predicted) / self.true)), axis=0)))", "def rmsError(self, yTrue, yPred):\n if len(yPred) != len(yTrue):\n raise ValueError(\"Lengths of predicted and actual values doesn't match.\")\n\n noneCount = 0\n loss = 0\n for i in range(len(yTrue)):\n if yPred[i] == None:\n noneCount+=1\n else:\n loss += (yTrue[i] - yPred[i])**2\n loss = 0.5 * loss/len(yTrue)-noneCount\n return round(math.sqrt(loss), 2)", "def rmsle_cv(model, dataset,y):\r\n kf = KFold(n_folds, shuffle=True, random_state=42).get_n_splits(dataset)\r\n rmse= np.log(-cross_val_score(model, dataset, y, scoring=\"neg_mean_absolute_error\", cv = kf))\r\n return(rmse)", "def computeRmse(model, data, n):\n print \"RESULT_data:%s \" % ((data.map(lambda x: (x[0], x[1]))).take(50))\n predictions1 = model.predictAll(data.map(lambda x: (x[0], x[1])))\n print \"RESULT1: %s\" % predictions1\n predictionsAndRatings = predictions1.map(lambda x: ((x[0], x[1]), x[2])) \\\n .join(data.map(lambda x: ((x[0], x[1]), x[2]))) \\\n .values()\n #print \"RESULT2: %s\" % predictions1.take(11)\n return sqrt(predictionsAndRatings.map(lambda x: (x[0] - x[1]) ** 2).reduce(add) / float(n))", "def msll(Y_true, Y_pred, V_pred, Y_train):\n mt, st = Y_train.mean(), Y_train.std()\n ll = norm.logpdf(Y_true, loc=Y_pred, scale=np.sqrt(V_pred))\n rand_ll = norm.logpdf(Y_true, loc=mt, scale=st)\n msll = - (ll - rand_ll).mean()\n return msll", "def mle(data):\n\t\"\"\" return (tau, sigma ) \"\"\"\n\tcount_state_state,count_state_word,all_words = counts(data)\n\tsmooth_denom = len(all_words)\n\tsigma = get_sigma(count_state_state)\n\ttau = get_tau(count_state_word, smooth_denom)\n\treturn (tau,sigma)", "def _compute_rmse(self, data):\n actual = data.rating.values\n pred = self._predict_all(data)\n rmse = np.sqrt(np.sum((actual - pred) **2) /len(pred))\n return rmse", "def rmse(actual, predicted):\n rms = (actual-predicted)**2\n\n # Returning the sqaure root of the root mean square\n return float(np.sqrt(rms.mean()))", "def rmse(predicted, actual):\n #maybe make some assertions, assume have same length & in right order\n interm_total = 0\n for i in range(len(predicted)):\n interm_total += (predicted[i] - actual[i]) ** 2\n return sqrt(interm_total / len(predicted))", "def rmse_metric(actual, predicted):\r\n sum_error = 0.0\r\n for i in range(len(actual)):\r\n prediction_error = predicted[i] - actual[i]\r\n sum_error += (prediction_error ** 2)\r\n mean_error = sum_error / float(len(actual))\r\n return sqrt(mean_error)", "def rmse(true, predictions):\n true = np.array(true)\n predictions = np.array(predictions)\n return mean_squared_error(true, predictions) ** 0.5", "def evaluate(self, X_test, y_test):\n y_pred_train = self.pipeline.predict(self.X)\n mse_train = mean_squared_error(self.y, y_pred_train)\n rmse_train = np.sqrt(mse_train)\n \n self.mlflow_log_metric('rmse_train', rmse_train)\n \n y_pred_test = self.pipeline.predict(X_test)\n mse_test = mean_squared_error(y_test, y_pred_test)\n rmse_test = np.sqrt(mse_test)\n self.mlflow_log_metric('rmse_test', rmse_test)\n \n return (round(rmse_train, 3) ,round(rmse_test, 3))", "def evaluate(self, X_test, y_test):\n \n y_pred = self.pipeline.predict(X_test)\n test_rmse = compute_rmse(y_pred, y_test)\n print(\"test rmse:\", test_rmse)\n return test_rmse", "def mrr(ground_truth, prediction):\n rr = 0.\n for rank, item in enumerate(prediction):\n if item in ground_truth:\n rr = 1. / (rank + 1)\n break\n return rr", "def get_r2_score(ground_truth, predicted):\n residual = np.sum(np.square(np.subtract(ground_truth, predicted)))\n print(residual)\n total = np.sum(np.square(np.subtract(ground_truth, np.mean(ground_truth))))\n print(total)\n return np.subtract(1.0, np.divide(residual, (total + 0.00000000001)))", "def mse(result, expected):\n total_square_sum = 0\n for index1 in range(0, len(result)):\n total_square_sum += (result[index1] - expected[index1]) ** 2\n return total_square_sum / float(len(result))", "def mse_r2(true, predicted):\n # Reshaping set of images\n # n_imgs, nx, ny = true.shape\n # true = np.reshape(true, (n_imgs, nx*ny))\n # predicted = np.reshape(predicted, (n_imgs, nx*ny))\n nx = 33\n ny = 33\n\n # Compute MSE\n se = np.sum((true - predicted)**2, axis=1)\n mse = se*(nx*ny)**-1\n\n # Compute R squared\n mean = np.mean(true, axis=1)\n r2 = 1 - se*np.sum((true - np.expand_dims(mean, axis=1))**2, axis=1)**-1\n\n return mse, r2", "def rmse(actual: np.ndarray, predicted: np.ndarray):\n return np.sqrt(np.mean(np.square(_error(actual, predicted))))", "def rmse(labels, predictions):\n n = len(labels)\n differences = numpy.subtract(labels, predictions)\n return numpy.sqrt(1.0/n * (numpy.dot(differences, differences)))", "def R_squared(y_true, y_pred):\n SSE = K.sum(K.square(y_true - y_pred))\n TSS = K.sum(K.square(y_true - K.mean(y_true)))\n return 1-SSE/(TSS+K.epsilon())", "def computeRmse(model, data, n):\n predictions = model.predictAll(data.map(lambda x: (x[0], x[1])))\n predictionsAndRatings = predictions.map(lambda x: ((x[0], x[1]), x[2])) \\\n .join(data.map(lambda x: ((x[0], x[1]), x[2]))) \\\n .values()\n return sqrt(predictionsAndRatings.map(lambda x: (x[0] - x[1]) ** 2).reduce(add) / float(n))", "def get_rmse(self, y_true, y_pred):\r\n return np.sqrt(np.mean((np.array(y_true) - np.array(y_pred)) ** 2))", "def evaluate(self, X_test, y_test):\n pipeline = run()\n y_pred = pipeline.predict(X_test)\n rmse = compute_rmse(y_pred, y_test)\n print(rmse)\n return rmse", "def rmsle(y_hat, y):\n\n log_diff = np.log(y_hat) - np.log(y)\n return np.sqrt(np.mean(log_diff**2))", "def mm_lrt_test(y, K):\n lm = LinearModel(y)\n lmm = LinearMixedModel(y)\n lmm.add_random_effect(K)\n lmm_res = lmm.get_ML()\n ll0 = lm.get_ll()\n ll1 = lmm_res['max_ll']\n D = 2 * (ll1 - ll0)\n pval = stats.chi2.sf(D, 1)\n return {'pval':pval, 'lrt_stat':D}", "def regression_evaluation(self, test_set, predicted_values):\r\n\r\n MAE = self.mean_absolute_error(test_set, predicted_values)\r\n MSE = self.mean_square_error(test_set, predicted_values)\r\n print(f\"Mean Percent Error:\\t{MAE:.2f}\")\r\n print(f\"Mean Square Error:\\t{MSE:.2f}\")", "def squaredError(label, prediction):\n return (label-prediction)*(label-prediction)", "def compute_r_squared(data, predictions):\n sst = ((data - np.mean(data)) ** 2).sum()\n ssreg = ((data - predictions) ** 2).sum()\n r_squared = 1 - ssreg / sst\n return r_squared", "def evaluate_rmse(y_true, y_pred):\n\n mse_eval = mean_squared_error(y_true, y_pred)\n\n rmse_eval = np.sqrt(mse_eval)\n\n return rmse_eval", "def computeRmse(model, data, n , sc):\n truth = data.map( lambda x: ((x[0], x[1]), x[2]) )\n truth.cache()\n ##print 'test zhou 0.....', truth.count() , '............', truth.take(10)\n\n predictions = model.predictAll(data.map(lambda x: (x[0], x[1])))\n predictions.cache()\n # here let's rescale predicted ratings to 0-10 scale\n maxPrediction = predictions.map(lambda x: x[2]).max()\n minPrediction = predictions.map(lambda x: x[2]).min()\n maxRate = RatingScale\n minRate = RatingScaleMin\n ##print 'test zhou 1......', predictions.count(), '............', predictions.take(10)\n\n #predictionsAndRatings = predictions.map(lambda x: ((x[0], x[1]), (x[2]-minPrediction)/(maxPrediction-minPrediction)*(maxRate-minRate)+minRate )).join(data.map(lambda x: ((x[0], x[1]), x[2]))).values()\n\n\n #predictedRating = predictions.map(lambda x: ((x[0], x[1]), (x[2]-minPrediction)/(maxPrediction-minPrediction)*(maxRate-minRate)+minRate ) )\n predictedRating = predictions.map(lambda x: ((x[0], x[1]), x[2] ) )\n predictedRating.cache()\n ##predictedRating.checkpoint()\n ##print 'test zhou 2.......', predictedRating.count(), '............', predictedRating.take(10)\n\n\n \n\n\n predictionsAndRatings = predictedRating.join(truth).values()\n #predictionsAndRatings = sc.union(predictedRating, truth)\n predictionsAndRatings.cache()\n #print 'test zhou 3........', predictionsAndRatings.count(), '............', predictionsAndRatings.take(10)\n #predictionsAndRatings = predictions.map(lambda x: ((x[0], x[1]), x[2])).join(data.map(lambda x: ((x[0], x[1]), x[2]))).values()\n \n return sqrt(predictionsAndRatings.map(lambda x: (x[0] - x[1]) ** 2).reduce(add) / float(n))\n #return 1.0", "def rmse(self, weights=None) -> float:\n return sqrt(np.average((self.true - self.predicted) ** 2, axis=0, weights=weights))", "def evaluate_regression(x, t, w, basis, degree):\n \t# TO DO:: Compute t_est and err \n #w_tranpose=w.T\n\n\n # My logic goes as follows:\n # Definition of test error is when you run the trained\n # model against a dataset that it hasn't been exposed to\n # this dataset is known as the testset \n\n # As such the basic algorithm goes as follows:\n # We do not need to recompute the weights but we need to recompute\n # phi for our test data\n\n # As such, we are interested in how well our trained weights\n # estimate against the test data so we matrix multiply our\n # weights against the phi from our test data\n # thus t_est = w_train.T*phi(x) since we want to know how well our\n # trained model estimates against the training data\n # but in implementation we do phi(x)*w_train\n # to match array dimensions \n\n\n #Compute design matrix from test data \n phi=design_matrix(x,basis,degree)\n phi_cross=np.linalg.pinv(phi)\n\n # Compute testing weights // just in case we require this variable\n #if(t is not None):\n #w_test=phi_cross.dot(t)\n #w_test=phi_cross.dot(t)\n\n # We want to be able to index into our target vector\n\n #t_est=phi.dot(w_test)\n #if (t is not None):\n # testing_estimate=phi.dot(w_test)\n #testing_estimate=phi.dot(w_test)\n\n # Estimate of our targets according to test data against learned \n # coefficients\n t_est=phi.dot(w)\n #print(\"t_est\",t_est)\n #t_est = None\n\n # We calculate the RMS error as follows\n # Take equation 3.12 of PRML and modify as follows\n # My logic:\n # The equation given in PRML gives the SSE (sum of squares error)\n # By definition the MSE (mean squared error) takes the SSE and divides \n # it by population size, we also preserve the 1/2 constant \n # throughout our calcuations \n # Afterwards we take our MSE and square root it.\n\n # Compute difference between target and estimate\n\n if(t is not None):\n \n diff=t-t_est\n # Square all observations\n diff_squared=np.power(diff,2)\n # Sum up all the observations in our vector\n sig_squared=diff_squared.sum()\n half_sig_squared=0.5*(sig_squared)\n # Calculate population size\n population_size=t.shape[0]\n rmse=np.sqrt(half_sig_squared/population_size)\n err=rmse\n else:\n err=None\n\n #diff=t-t_est\n\n\n # Square all observations \n #diff_squared=np.power(diff,2)\n\n # Sum up all the observations in our vector\n #sig_squared=diff_squared.sum()\n\n #half_sig_squared=0.5*(sig_squared)\n\n # Calculate population size\n #population_size=t.shape[0]\n\n #rmse=np.sqrt(half_sig_squared/population_size)\n #err = rmse\n #print(\"err inside function\",err)\n #err=rmse\n return (t_est, err)", "def test_model_outcome(predicted, actual, planned):\n if not isinstance(predicted, pd.DataFrame):\n predicted = pd.DataFrame(predicted, columns=[\"PREDICTED_TRIP_DURATION\"])\n if not isinstance(actual, pd.DataFrame):\n actual = pd.DataFrame(actual, columns=[\"ACTUAL_TRIP_DURATION\"])\n if not isinstance(planned, pd.DataFrame):\n planned = pd.DataFrame(planned, columns=[\"PLANNED_TRIP_DURATION\"])\n # Initialise the combined dataframe\n combined = pd.concat([predicted, actual, planned], axis=1)\n # Calculate the actual delay\n actual_delay = combined[\"PLANNED_TRIP_DURATION\"] - combined[\"ACTUAL_TRIP_DURATION\"]\n # Calculate the predicted delay\n predicted_delay = combined[\"PLANNED_TRIP_DURATION\"] - combined[\"PREDICTED_TRIP_DURATION\"]\n # Calculate the difference in delay\n delay_diff = actual_delay - predicted_delay\n # Combine the delays into a single dataframe\n combined_delay = pd.concat([pd.DataFrame(actual_delay, columns=['Actual_Delay']),\n pd.DataFrame(predicted_delay, columns=['Predicted_Delay']),\n pd.DataFrame(delay_diff, columns=['Difference_In_Delay'])], axis=1)\n # Obtain the index of the max and min values of the actual, predicted and difference delays\n actual_max_index = combined_delay[\"Actual_Delay\"].argmax()\n actual_min_index = combined_delay[\"Actual_Delay\"].argmin()\n predicted_max_index = combined_delay[\"Predicted_Delay\"].argmax()\n predicted_min_index = combined_delay[\"Predicted_Delay\"].argmin()\n delay_diff_max_index = combined_delay[\"Difference_In_Delay\"].argmax()\n delay_diff_min_index = combined_delay[\"Difference_In_Delay\"].argmin()\n # Get the Mean Absolute Error\n MAE = metrics.mean_absolute_error(combined[\"ACTUAL_TRIP_DURATION\"], combined[\"PREDICTED_TRIP_DURATION\"])\n # Get the R2 Score\n R2 = metrics.r2_score(combined[\"ACTUAL_TRIP_DURATION\"], combined[\"PREDICTED_TRIP_DURATION\"])\n # Get the Root Mean Squared Error\n RMSE = metrics.mean_squared_error(combined[\"ACTUAL_TRIP_DURATION\"], combined[\"PREDICTED_TRIP_DURATION\"],\n squared=False)\n # Get the Median Absolute Error\n MEDAE = metrics.median_absolute_error(combined[\"ACTUAL_TRIP_DURATION\"], combined[\"PREDICTED_TRIP_DURATION\"])\n # Get the Mean Squared Error Log Value\n MSLE = metrics.mean_squared_log_error(combined[\"ACTUAL_TRIP_DURATION\"], combined[\"PREDICTED_TRIP_DURATION\"])\n # Build Dictionary\n pass_val = {\"combined\": combined,\n \"combined_delay\": combined_delay,\n \"actual_max_index\": actual_max_index,\n \"actual_min_index\": actual_min_index,\n \"predicted_max_index\": predicted_max_index,\n \"predicted_min_index\": predicted_min_index,\n \"delay_diff_max_index\": delay_diff_max_index,\n \"delay_diff_min_index\": delay_diff_min_index,\n \"MAE\": MAE,\n \"R2\": R2,\n \"MEDAE\": MEDAE,\n \"RMSE\": RMSE,\n \"MSLE\": MSLE}\n # Return Dictionary\n return pass_val", "def rmse(rslt):\n # Antibugging\n assert (isinstance(rslt, dict))\n\n # Distribute information\n x_internal = rslt['AUX']['x_internal']\n start_internal = rslt['AUX']['init_values']\n\n # Calculate statistic\n rslt = ((x_internal - start_internal) ** 2).mean()\n\n # Antibugging\n assert (np.isfinite(rslt))\n assert (rslt > 0.0)\n\n # Finishing\n return rslt", "def evaluate_regression(x_test,t_test,basis,bias,w,degree=1,mu=None,s=1):\n \n phi = design_matrix(x_test,basis,degree,bias,mu,s)\n pred_test=phi@w\n # Measure root mean squared error on testing data.\n t_est = pred_test\n #print(\"deleteeeeeeeeeee\",t_est)\n #print(np.shape(t_est))\n err = np.sqrt((np.square(pred_test-t_test)).mean())\n \n \n\n return (t_est, err)", "def mse(real, predicted):\n # Calculate the mse\n N = len(real)\n mse = (1 / N) * np.sum((real - predicted) ** 2)\n return mse", "def predict(self) :\n y_pred = np.dot(self.W.T,self.X_test) + self.b \n if self.thr!=-1 :\n y_pred[y_pred <= self.thr] = -1\n y_pred[y_pred > self.thr] = 1\n y_pred = y_pred.astype(\"int\")\n corr = 0\n for i in range(y_pred.shape[1]) :\n if y_pred[:,i]==self.y_test[:,i] :\n corr += 1\n accu = (corr / y_pred.shape[1])*100\n print(\"ACCURACY : {}\".format(accu))\n else :\n rmse = np.sqrt(np.sum(np.square(self.y_test - y_pred)) / y_pred.shape[1])\n print(\"RMSE : {}\".format(rmse))", "def calc_rmse(self, data):\n res= data.score- data[['userid','itemid']].apply(lambda row:self.calc_score(row[0], row[1]),axis=1)\n res=[el**2 for el in np.array(res)]\n return np.sqrt(np.sum(res)/data.shape[0])", "def r_squared(measured, predicted):\n estimated_error = ((predicted - measured)**2).sum()\n mean_of_measured = measured.sum()/len(measured)\n variability = ((measured - mean_of_measured)**2).sum()\n return 1 - estimated_error/variability", "def netflix_rmse(answer, pred):\n ans = []\n pre = []\n if isinstance(answer, dict) and isinstance(pred, dict):\n for key1, value in pred.items():\n mov = answer[key1]\n for key2, val in value.items():\n pre.append(val)\n ans.append(mov[key2])\n zip_list = zip(ans, pre)\n sum_val = sum([(x - y) ** 2 for x, y in zip_list])\n return (sum_val / len(ans)) ** (0.5)\n else:\n zip_list = zip(answer, pred)\n sum_val = sum([(x - y) ** 2 for x, y in zip_list])\n return (sum_val / len(answer)) ** (0.5)", "def calcRMSE(labelsAndPreds):\n meanOfSqErrors = labelsAndPreds.map(lambda (x,y): squaredError(x,y)).mean()\n \n return math.sqrt(meanOfSqErrors)", "def _rmses(A, X, Y):\n return npext.rms(Y - np.dot(A, X), axis=0)", "def smpl_losses(self, pred_rotmat, pred_betas, gt_pose, gt_betas, has_smpl):\n conf = has_smpl.float()\n gt_rotmat = batch_rodrigues(gt_pose.view(-1, 3)).view(-1, 24, 3, 3)\n loss_regr_pose = self.criterion_regr(pred_rotmat, gt_rotmat)\n loss_regr_betas = self.criterion_regr(pred_betas, gt_betas)\n loss_regr_pose = (conf[:, None, None, None] * loss_regr_pose).mean()\n loss_regr_betas = (conf[:, None] * loss_regr_betas).mean()\n return loss_regr_pose, loss_regr_betas", "def MSEStep(X, y, W, b, learn_rate = 0.005):\n \n #compute predicted y \n y_pred = np.matmul(X, W) + b\n \n #compute the error the these predictions\n error = y - y_pred\n \n # compute steps\n W_new = W + learn_rate * np.matmul(error, X)\n b_new = b + learn_rate * error.sum()\n \n return W_new, b_new", "def bp_mll_loss(y_true: tf.Tensor, y_pred: tf.Tensor) -> tf.Tensor:\n\n # get true and false labels\n shape = tf.shape(y_true)\n y_i = tf.equal(y_true, tf.ones(shape))\n y_i_bar = tf.not_equal(y_true, tf.ones(shape))\n\n # get indices to check\n truth_matrix = tf.cast(pairwise_and(y_i, y_i_bar), dtype=tf.float32)\n\n # calculate all exp'd differences\n sub_matrix = pairwise_sub(y_pred, y_pred)\n exp_matrix = tf.exp(tf.negative(sub_matrix))\n\n # check which differences to consider and sum them\n sparse_matrix = tf.multiply(exp_matrix, truth_matrix)\n sums = tf.reduce_sum(sparse_matrix, axis=[1,2])\n\n # get normalizing terms and apply them\n y_i_sizes = tf.reduce_sum(tf.cast(y_i, dtype=tf.float32), axis=1)\n y_i_bar_sizes = tf.reduce_sum(tf.cast(y_i_bar, dtype=tf.float32), axis=1)\n normalizers = tf.multiply(y_i_sizes, y_i_bar_sizes)\n results = tf.divide(sums, normalizers)\n\n # average error\n return tf.reduce_mean(results)", "def calc_error(y_real, y_pred):\n if len(y_real) > 0:\n curr_err = rmse(y_pred, y_real)\n else:\n curr_err = np.nan\n return curr_err", "def rmse(self):\n y_pred, y_true = self._finalize_labels_and_prediction()\n\n return np.sqrt(F.mse_loss(y_pred, y_true).cpu().item())", "def rmse(self):\n y_pred, y_true = self._finalize_labels_and_prediction()\n\n return np.sqrt(F.mse_loss(y_pred, y_true).cpu().item())", "def computeErrorRate(test_sent, viterbi_tag_sequence):\n # initiate vars\n correct_predictions = 0\n total_predictions = 0\n correct_unknown_predictions = 0\n total_unknown_predictions = 0\n\n for j in range(len(test_sent)): # iterate tups in sent\n expectedTag = test_sent[j][1]\n actualTag = viterbi_tag_sequence[j]\n if actualTag == UNKNOWN_TAG:\n if expectedTag == UNKNOWN_TAG:\n correct_unknown_predictions += 1\n total_unknown_predictions += 1\n else:\n if actualTag == expectedTag:\n correct_predictions += 1\n total_predictions += 1\n\n err_rate_known = 1 - correct_predictions/total_predictions\n if total_unknown_predictions == 0:\n err_rate_unknown = 0\n else:\n err_rate_unknown = 1 - correct_unknown_predictions/total_unknown_predictions\n\n tot_pred = total_predictions + total_unknown_predictions\n corr_pred = correct_predictions + correct_unknown_predictions\n total_err = 1 - corr_pred/tot_pred\n\n return err_rate_known, err_rate_unknown, total_err", "def computeErrorRate(test_set, words_likely_tags):\n # initiate vars\n known_words = {} # those two dictionaries are optional, just for debuging\n unknown_words = {} # those two dictionaries are optional, just for debuging\n correct_predictions = 0\n total_predictions = 0\n correct_unknown_predictions = 0\n total_unknown_predictions = 0\n\n for i in range(len(test_set)): # iterate sentences\n test_sent = test_set[i]\n for j in range(len(test_sent)): # iterate words in sent\n w = test_sent[j][WORD]\n t = test_sent[j][TAG]\n\n # known words\n if w in words_likely_tags:\n if w in known_words:\n known_words[w][COUNTER_SHOWS] += 1\n if t == words_likely_tags[w]: # same tag\n known_words[w][COUNTER_EQUAL] += 1\n correct_predictions += 1\n else:\n if t == words_likely_tags[w]: # same tag\n known_words[w] = {COUNTER_SHOWS: 1, COUNTER_EQUAL: 1}\n correct_predictions += 1\n else:\n known_words[w] = {COUNTER_SHOWS: 1, COUNTER_EQUAL: 0}\n\n total_predictions += 1\n # unknown words\n else: # w not in words_likely_tags, treat w as unknown_word\n if w in unknown_words:\n unknown_words[w][COUNTER_SHOWS] += 1\n if t == UNKNOWN_TAG:\n # same tag as our model predicts for unknown words\n unknown_words[w][COUNTER_EQUAL] += 1\n correct_unknown_predictions += 1\n else:\n if t == UNKNOWN_TAG: # same tag\n unknown_words[w] = {COUNTER_SHOWS: 1, COUNTER_EQUAL: 1}\n correct_unknown_predictions += 1\n else:\n unknown_words[w] = {COUNTER_SHOWS: 1, COUNTER_EQUAL: 0}\n\n total_unknown_predictions += 1\n\n # print('correct_predictions......... = ', correct_predictions)\n # print('total_predictions........... = ', total_predictions)\n # print('correct_unknown_predictions. = ', correct_unknown_predictions)\n # print('total_unknown_predictions... = ', total_unknown_predictions)\n err_rate_known = 1 - correct_predictions/total_predictions\n err_rate_unknown = 1 - correct_unknown_predictions/total_unknown_predictions\n # total_err = err_rate_known + err_rate_unknown\n tot_pred = total_predictions + total_unknown_predictions\n corr_pred = correct_predictions + correct_unknown_predictions\n total_err = 1 - corr_pred/tot_pred\n\n return err_rate_known, err_rate_unknown, total_err", "def rsr(self) -> float:\n return float(self.rmse() / np.std(self.true))", "def rmse(X, Y):\n\n assert X.shape == Y.shape\n\n N = X.shape[0]\n\n if N < 9:\n print(\"Not enough points. {} datapoints given. At least 9 is required\".format(N))\n return\n\n diff = X - Y\n diff = diff**2\n rmse = np.sqrt(diff.mean())\n\n le = rmse * (1.0 - np.sqrt(1-1.96*np.sqrt(2.0)/np.sqrt(N-1)))\n ue = rmse * (np.sqrt(1 + 1.96*np.sqrt(2.0)/np.sqrt(N-1))-1)\n\n return rmse, le, ue", "def rmse(y_true, y_pred):\n\treturn backend.sqrt(backend.mean(backend.square(y_pred - y_true), axis=-1))", "def learnign_rate_examples():\n #######\n bad_larning_rate = 0.1\n not_bad_learning_rate = 1e-4\n good_learning_rate = 1e-3\n #######\n return bad_larning_rate, not_bad_learning_rate, good_learning_rate", "def mb_r(self) -> float:\n # Calculate metric\n n = self.predicted.size\n tot = 0.0\n for i in range(n):\n tot = tot + np.sum(np.abs(self.predicted - self.true[i]))\n mae_val = np.sum(np.abs(self.predicted - self.true)) / n\n mb = 1 - ((n ** 2) * mae_val / tot)\n\n return float(mb)", "def evaluation_error(y_real, y_pred, max_rating, min_rating):\n mae = mean_absolute_error(y_real, y_pred)\n nmae = normalized_mean_absolute_error(y_real, y_pred,\n max_rating, min_rating)\n rmse = root_mean_square_error(y_real, y_pred)\n\n return mae, nmae, rmse", "def rms_error(self, X, y) :\n ### ========== TODO : START ========== ###\n # part h: compute RMSE\n n, d = X.shape\n error = np.sqrt(self.cost(X,y)/n)\n ### ========== TODO : END ========== ###\n return error", "def test_particular_lemke_howson_with_lexicographic_ratio_test(self):\n\n A = np.array([[3, 3], [2, 5], [0, 6]])\n B = np.array([[3, 2], [2, 6], [3, 1]])\n for label, output in [\n (0, (np.array([1, 0, 0]), np.array([1, 0]))),\n (1, (np.array([0, 1 / 3, 2 / 3]), np.array([1 / 3, 2 / 3]))),\n (2, (np.array([1, 0, 0]), np.array([1, 0]))),\n (3, (np.array([1, 0, 0]), np.array([1, 0]))),\n (4, (np.array([0, 1 / 3, 2 / 3]), np.array([1 / 3, 2 / 3]))),\n ]:\n for eq, expected_eq in zip(lemke_howson_lex(A, B, label), output):\n self.assertTrue(all(np.isclose(eq, expected_eq)))\n\n A = np.array([[1, -1], [-1, 1]])\n B = -A\n for label in range(4):\n for eq in lemke_howson_lex(A, B, label):\n self.assertTrue(\n all(np.isclose(eq, np.array([1 / 2, 1 / 2]))), msg=str(eq)\n )", "def fit_rms(a,b):\n\tif len(a)!=len(b):\n\t\traise ValueError('Input vectors have unequal lengths.')\n\treturn rms(np.subtract(a,b))", "def relative_rmse(self) -> float:\n rrmse = self.rmse() / np.mean(self.true)\n return float(rrmse)", "def linear_regression(X, Y, Xs_test, Ys_test):\n\n X_n = (X - np.mean(X, axis = 0)) / np.std(X, axis = 0)\n XL = np.concatenate((X_n, np.ones((len(X),1))), axis = 1)\n w = np.linalg.solve(XL.T.dot(XL),XL.T.dot(Y))\n mses = []\n for i, X_test in enumerate(Xs_test):\n Y_test = Ys_test[i]\n XL_test = np.concatenate(((X_test - np.mean(X, axis = 0)) / np.std(X, axis = 0), \n np.ones((len(X_test),1))), axis = 1)\n Y_pred = XL_test.dot(w)\n mse = np.mean(np.sqrt(np.sum((Y_pred - Y_test) ** 2, axis = 1))) \n mses.append(mse) \n return mses", "def _validateRegression(self, trainingSet):\n \n sumErrors = [0] * len(trainingSet[0].label) \n\n sumTotal = 0\n \n for example in trainingSet:\n Y = self.test(example)\n \n errors = [(example.label[i] - Y[i])**2 for i in range(0,self.K)]\n \n for i in range(len(errors)):\n sumErrors[i] += errors[i]\n \n sumTotal += sum(errors) \n \n return 0.5 * sumTotal, errors", "def rmse(y_true, y_pred):\n return backend.sqrt(backend.mean(backend.square(y_pred - y_true), axis=-1))", "def msne(self):\n\n # linear programming for every support in the self\n for support1 in util.power_supports(self.s[0]):\n for support2 in util.power_supports(self.s[1]):\n # logging.info(f'MSNE calculation for supports: {support1}\\n{support2}')\n result2 = self._lp_msne(support1, support2, 1)\n result1 = self._lp_msne(support2, support1, 2)\n if result1.success and result2.success:\n return result1.x[1:], result2.x[1:]\n\n logging.warning('not able to find msne for the game')", "def exp_rmspe(y_true, y_pred):\n pct = tf.square((tf.exp(y_true) - tf.exp(y_pred)) / tf.exp(y_true))\n\n # compute mean excluding stores with zero denominator\n x = tf.reduce_sum(tf.where(y_true > 0.001, pct, tf.zeros_like(pct)))\n y = tf.reduce_sum(tf.where(y_true > 0.001, tf.ones_like(pct), tf.zeros_like(pct)))\n return tf.sqrt(x / y)", "def felm_rmse(y, X, weights, y_test, X_test):\n # Fit model and get predicted values of test data\n mod = sm.WLS(y, X, weights=weights).fit()\n pred = mod.predict(X_test)\n\n #Get residuals from test data\n res = (y_test[:] - pred.values)\n\n # Calculate ttest to check that residuals from test and train are independent\n t_stat, p_val = stats.ttest_ind(mod.resid, res, equal_var=False)\n\n # Return RMSE and t-stat from ttest\n return (np.sqrt(np.mean(res**2)), t_stat)", "def evaluate(self, X_test, y_test):\n y_pred = self.pipeline.predict(X_test)\n return compute_rmse(y_pred, y_test)", "def test_single_linear_regression_r_squared(single_linear_regression_model):\n # Train Data\n train_r_squared = single_linear_regression_model.calculate_r_squared(\n single_linear_regression_model.predictor_vars_train,\n single_linear_regression_model.response_var_train[:, 0],\n )\n\n test_r_squared = single_linear_regression_model.calculate_r_squared(\n single_linear_regression_model.predictor_vars_test,\n single_linear_regression_model.response_var_test[:, 0],\n )\n\n assert pytest.approx(train_r_squared, 0.001) == 1\n assert pytest.approx(test_r_squared, 0.001) == 1", "def test_qqlnu_sm(self):\n R = flavio.sm_prediction('R_13(pp->munu)', 1e3, 2e3)\n self.assertEqual(R, 1,msg=f'SM prediction for R ratio: {R} (has to be 1)')", "def rmse(y_true, y_pred): # -> Any:\n ...", "def PredictRunwayRMSE(DF, predictor, input_func, name=None):\r\n if name:\r\n print 'Predicted win for %s:' % name\r\n weighted1 = 0\r\n weighted2 = 0\r\n total_weight = 0\r\n for df in DF:\r\n series_in = input_func(df)\r\n assert all(len(s) == len(series_in[0]) for s in series_in)\r\n X, _ = Transform(series_in, [])\r\n df['prediction'] = predictor.predict(X)\r\n assert len(df.prediction) == len(series_in[0]), (len(df.prediction), len(series_in[0]))\r\n\r\n filter_runway = (df.actual_runway_arrival < df.actual_gate_arrival)\r\n golden_runway = df.actual_runway_arrival[filter_runway]\r\n r1 = util.RMSE(golden_runway, df.last_era_update[filter_runway])\r\n r2 = util.RMSE(golden_runway, (df.last_era_update + df.prediction)[filter_runway])\r\n w = len(df.last_era_update[filter_runway])\r\n weighted1 += r1 * w\r\n weighted2 += r2 * w\r\n total_weight += w\r\n #print 'Runway: %.2f' % (r1 - r2)\r\n\r\n weighted_score = ((weighted1 - weighted2) / total_weight)\r\n print 'Weighted: %.4f' % weighted_score\r\n return weighted_score", "def lr_insight_wr():\n steps = [('scaler', t.MyScaler(dont_scale='for_profit')),\n ('knn', t.KNNKeepDf())]\n pipe = Pipeline(steps)\n pipe.fit(X_raw)\n X = pipe.transform(X_raw)\n\n lr = LinearRegression()\n lr.fit(X, y)\n cv_results = cross_validate(lr, X, y,\n scoring=['r2', 'neg_mean_squared_error',\n 'neg_mean_absolute_error'],\n return_train_score=True)\n output = pd.DataFrame(\n {'train_r2': [cv_results['train_r2'].mean()],\n 'train_rmse': [np.mean(\n [np.sqrt(abs(i))\n for i in cv_results['train_neg_mean_squared_error']])],\n 'train_mae': [abs(cv_results['train_neg_mean_absolute_error'].mean())],\n 'test_r2': [cv_results['test_r2'].mean()],\n 'test_rmse': [np.mean(\n [np.sqrt(abs(i))\n for i in cv_results['test_neg_mean_squared_error']])],\n 'test_mae': [abs(cv_results['test_neg_mean_absolute_error'].mean())]\n },\n index=['LR']\n )\n return output", "def measure_rmse(self):\n try:\n assert self.residuals_forecast is not None\n except AssertionError:\n self._uvts_cls_logger.exception(\"AssertionError occurred, Cannot compute RMSE! Check your object mode\")\n\n self.rmse = np.sqrt(sum(np.square(self.residuals_forecast)) / len(self.residuals_forecast))\n \"\"\"\n if self._mode == 'test':\n self.rmse_test = self.rmse\n elif self._mode == 'test and validate':\n self.rmse_val = self.rmse - self.rmse_test\n elif self._mode == 'validate':\n self.rmse_val = self.rmse\n \"\"\"", "def evaluate_ave_squared_error(in_prediction_filename, in_answer_filename):\n with open(in_prediction_filename) as fin:\n predicted_tags = [l.strip() for l in fin.readlines() if len(l.strip()) != 0]\n\n with open(in_answer_filename) as fin:\n ground_truth_tags = [l.strip() for l in fin.readlines() if len(l.strip()) != 0]\n\n assert len(predicted_tags) == len(ground_truth_tags)\n error = 0.0\n for pred, truth in zip(predicted_tags, ground_truth_tags):\n error += (int(pred) - int(truth))**2\n return error/len(predicted_tags), error, len(predicted_tags)", "def eval_error_metric_skl(y_true: np.ndarray, y_score: np.ndarray) -> float:\n r = np.zeros(y_score.shape)\n gt = y_score > 0.5\n r[gt] = 1 - y_true[gt]\n le = y_score <= 0.5\n r[le] = y_true[le]\n return np.sum(r)", "def se(actual,expected):\n return np.power(np.subtract(actual,expected),2)", "def rmse(y_true: np.ndarray, y_pred: np.ndarray):\n return np.sqrt(np.mean(np.power(y_true - y_pred, 2)))", "def nmse(gt, pred):\n return np.linalg.norm(gt - pred) ** 2 / np.linalg.norm(gt) ** 2", "def nmse(gt, pred):\n return np.linalg.norm(gt - pred) ** 2 / np.linalg.norm(gt) ** 2", "def mse(response_vector, prediction_vector):\n return np.power(response_vector - prediction_vector, 2).mean()", "def nrmse(self) -> float:\n return float(self.rmse() / (np.max(self.true) - np.min(self.true)))", "def nmse(actual: np.ndarray, predicted: np.ndarray):\n return np.mean(np.square(actual - predicted)) / (np.mean(actual) * np.mean(predicted) + np.finfo(float).eps)", "def rmse(model, ratings):\n predictions = model.predict_all().clip(1, 5)\n predictions = predictions[ratings.nonzero()]\n true_values = ratings[ratings.nonzero()]\n \n return np.sqrt(np.mean((predictions - true_values)**2))", "def get_test_rmse(self, test):\n nnz_user, nnz_item = test.nonzero()\n nnz_test = list(zip(nnz_user, nnz_item))\n rmse = 0.0\n for u, i in nnz_test:\n user = self.user_matrix[u, :]\n item = self.item_matrix[:, i]\n pred = user @ item\n if pred > 5:\n pred = 5\n if pred < 1:\n pred = 1\n rmse += (self.data_matrix[u, i] - pred) ** 2\n rmse = np.sqrt(rmse / len(nnz_test))\n return rmse", "def lr_insight_er():\n steps = [('scaler', t.MyScaler(dont_scale='for_profit')),\n ('knn', t.KNNKeepDf())]\n pipe = Pipeline(steps)\n pipe.fit(X_raw_er)\n X = pipe.transform(X_raw_er)\n\n lr = LinearRegression()\n lr.fit(X, y_er)\n cv_results = cross_validate(lr, X, y_er,\n scoring=['r2', 'neg_mean_squared_error',\n 'neg_mean_absolute_error'],\n return_train_score=True)\n output = pd.DataFrame(\n {'train_r2': [cv_results['train_r2'].mean()],\n 'train_rmse': [np.mean(\n [np.sqrt(abs(i))\n for i in cv_results['train_neg_mean_squared_error']])],\n 'train_mae': [abs(cv_results['train_neg_mean_absolute_error'].mean())],\n 'test_r2': [cv_results['test_r2'].mean()],\n 'test_rmse': [np.mean(\n [np.sqrt(abs(i))\n for i in cv_results['test_neg_mean_squared_error']])],\n 'test_mae': [abs(cv_results['test_neg_mean_absolute_error'].mean())]\n },\n index=['LR']\n )\n return output", "def mbe(self) -> float:\n return float(np.mean(self._error(self.true, self.predicted)))", "def get_comparison_error(self, img1, img2, diffImg):\n\n output = subprocess.check_output(\n [\"compare\", \"-metric\", \"RMSE\", \"-alpha\", \"Off\", img1, img2, diffImg],\n stderr=subprocess.STDOUT,\n )\n rmse = float(output.split()[0])\n percent = float(output.split()[1][1:-1])\n return rmse, percent", "def get_rmsse(self, valid_preds) -> pd.Series:\n\n score = ((self.valid_series - valid_preds) ** 2).mean(axis = 1)\n rmsse = (score / self.scale).map(np.sqrt)\n\n return rmsse", "def test_using_predict(self):\n [X, labels, Y] = self.gen_data()\n # Call algorithm\n bias = multiLogReg(self.sds.from_numpy(\n X), self.sds.from_numpy(Y), verbose=False).compute()\n\n [m, y_pred, acc] = multiLogRegPredict(self.sds.from_numpy(\n X), self.sds.from_numpy(bias), self.sds.from_numpy(Y), verbose=False).compute()\n\n self.assertTrue(acc > 98)" ]
[ "0.7609951", "0.7355589", "0.7203462", "0.718389", "0.6820581", "0.64459103", "0.6406715", "0.6388028", "0.6317563", "0.6281459", "0.6191574", "0.60744154", "0.6051396", "0.6048326", "0.60155445", "0.59951115", "0.59895235", "0.5938883", "0.59192955", "0.5914614", "0.5897343", "0.58904225", "0.5843397", "0.5823863", "0.57472736", "0.5739611", "0.5714242", "0.57066935", "0.57009655", "0.5688419", "0.5683217", "0.568303", "0.56787366", "0.56783557", "0.5677663", "0.5671959", "0.5663178", "0.563159", "0.56295913", "0.56153923", "0.5601699", "0.56", "0.55979234", "0.55969167", "0.55867326", "0.5575986", "0.5573061", "0.55716753", "0.55656487", "0.5565141", "0.55477107", "0.55241966", "0.55143553", "0.5507724", "0.5507067", "0.54872406", "0.5476233", "0.5461353", "0.5461353", "0.5458187", "0.54506546", "0.54468536", "0.5436659", "0.5429054", "0.542607", "0.54185754", "0.5417798", "0.54060596", "0.5404523", "0.5395966", "0.5394548", "0.5392216", "0.5384595", "0.5366485", "0.5360449", "0.5360026", "0.5352473", "0.534784", "0.534191", "0.533871", "0.53289276", "0.53234655", "0.5316027", "0.53146476", "0.5305335", "0.5299562", "0.5293107", "0.5288641", "0.5278978", "0.5278978", "0.5276678", "0.5271771", "0.5271089", "0.52679425", "0.52650577", "0.5257201", "0.52563095", "0.5243584", "0.5237455", "0.5235081" ]
0.7670679
0
Return the path of the Bohrium systemwide configuration file
def config_file(self): return join_path(self.prefix.etc.bohrium, "config.ini")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getConfigPath():\n if sys.platform == 'linux':\n configpath = os.path.normpath(os.path.expanduser('~/.config/phobos'))\n elif sys.platform == 'darwin':\n configpath = os.path.normpath(os.path.expanduser('~/Library/Application Support/phobos'))\n elif sys.platform == 'win32':\n configpath = os.path.normpath(os.path.expanduser('~/AppData/Roaming/phobos'))\n else:\n configpath = 'ERROR: {0} not supported,'.format(sys.platform)\n return configpath", "def get_config_file_location():\n\n return './' + CONFIG_FILE_NAME", "def config_file_and_path():\n return str(rmfriend_dir() / 'config.cfg')", "def config_path(self):\n if os.path.exists(self._config_path):\n if pyhocon.ConfigFactory.parse_file(self._config_path):\n return os.path.realpath(self._config_path)\n # TODO if string is url/git repo, download file locally first\n return None", "def get_config_path():\n\n root = os.path.dirname(os.path.abspath(__file__))[:-5]\n config_path = os.path.join(root, 'config.ini')\n\n return config_path", "def _get_config_path():\n return os.path.join(os.path.expanduser('~'))", "def get_config_path() -> Path:\n config = os.getenv('TOM_CONFIG', '')\n return Path(config)", "def configPath(self):\n return os.path.dirname(__file__)", "def _app_config_file() -> str:\n if 'AISCALATOR_HOME' in os.environ:\n home = os.environ['AISCALATOR_HOME']\n file = os.path.join(home, \"config\", \"aiscalator.conf\")\n if os.path.exists(file):\n return file\n return os.path.join(os.path.expanduser(\"~\"), '.aiscalator',\n 'config', 'aiscalator.conf')", "def path_config(self):\n return HOMEASSISTANT_CONFIG.format(HASSIO_SHARE_INT)", "def system_conf_dir(self):\n return buildconfig.SPD_CONF_PATH", "def get_global_config_path():\n\n return \"/etc/dapsenv/dapsenv.conf\"", "def path(self):\n return os.path.join(self.config.get('path', os.getcwd()))", "def cfg_path(self):\n return self._cfg_path", "def config_path():\n dir_ = os.path.dirname(__file__)\n demo_dir = os.path.join(dir_, '../..')\n return os.path.join(demo_dir, 'mike_dev.ini')", "def config_file(self):\n return self[CONFIG_FILE_KEY]", "def get_config_file(self):\r\n return os.path.join(self.cloudletdir, \"applied_config\")", "def get_config_filepath():\n scs_installation_dirs = _path_utils.get_addon_installation_paths()\n\n # SEARCH FOR CONFIG...\n scs_config_file = ''\n for i, location in enumerate(scs_installation_dirs):\n test_path = os.path.join(location, 'config.txt')\n if os.path.isfile(test_path):\n scs_config_file = test_path\n break\n\n # IF NO CONFIG FILE, CREATE ONE...\n if scs_config_file == '':\n lprint(\"S Creating new 'config.txt' file:\\n\\t %r\", (os.path.join(scs_installation_dirs[0], 'config.txt'),))\n scs_config_file = new_config_file(os.path.join(scs_installation_dirs[0], 'config.txt'))\n\n # print('SCS Blender Tools Config File:\\n \"%s\"\\n' % os.path.join(scs_installation_dirs[0], 'config.txt'))\n return scs_config_file", "def config_file_address() -> str:\n\n config_files = json_files_from_folder(\"config\")\n config_file = choose_config(config_files) # Choice a config file if there is more then 1 in config folder\n return config_file", "def _get_config_filepath(self):\n\t\tif self.configfilepath is None:\n\t\t\treturn os.path.join(self.workdir, \"config.txt\")\n\t\telse:\n\t\t\treturn self.configfilepath", "def getConfigPath():\n\n global args, ConfigPathDefault\n\n if args.config_location:\n return args.config_location;\n return ConfigPathDefault;", "def get_user_config_path():\n\n return \"{}/.dapsenv/dapsenv.conf\".format(expanduser(\"~\"))", "def get_config_path(config):\n section = config.sections()[0]\n return Path(config.get(section, \"path\")).expanduser().absolute()", "def determine_config() -> str:\n if os.environ.get(PortholeConfig.CONFIG_ENV_NAME) is not None:\n return os.environ.get(PortholeConfig.CONFIG_ENV_NAME)\n if os.path.isfile(PortholeConfig.DEFAULT_CONFIG_FILE):\n return PortholeConfig.DEFAULT_CONFIG_FILE\n for file_path in PortholeConfig.OTHER_ALLOWED_CONFIG_PATHS:\n if os.path.isfile(file_path):\n return file_path\n raise FileNotFoundError(\n \"Porthole is unable to locate a useable config file. \"\n \"Try setting the PORTHOLE_CONFIG environment variable, \"\n \"or creating a porthole.ini file in your main project directory.\"\n )", "def get_default_config_path():\n if os.name == 'posix':\n config_path = os.path.join(os.path.expanduser(\"~\"), '.fpdb')\n elif os.name == 'nt':\n config_path = os.path.join(os.environ[\"APPDATA\"], 'fpdb')\n else: config_path = False\n return config_path", "def path_config_docker(self):\n return HOMEASSISTANT_CONFIG.format(HASSIO_SHARE_EXT)", "def default_config_file(self):\n return DEFAULT_CONFIG_FILEPATH", "def _get_config_fname():\n directory = _get_vispy_app_dir()\n if directory is None:\n return None\n fname = op.join(directory, 'vispy.json')\n if os.environ.get('_VISPY_CONFIG_TESTING', None) is not None:\n fname = op.join(_TempDir(), 'vispy.json')\n return fname", "def get_cfg_path(filename):\n return os.path.join(get_cfg_dir(), filename)", "def get_instance_config_path():\n return join(settings.PROJECT_DIR, \"conf\", \"eoxserver.conf\")", "def get_config_dir() -> str:\n # Get the system app configuration standard location\n if 'APPDATA' in os.environ:\n return os.environ['APPDATA']\n elif 'XDG_CONFIG_HOME' in os.environ:\n return os.environ['XDG_CONFIG_HOME']\n else:\n return os.path.join(os.environ['HOME'], '.config')", "def binpath(self):\n return self._query_config()['binpath']", "def get_configuration_file():\n path = os.path.abspath(os.curdir)\n while path != os.sep:\n config_path = os.path.join(path, CONFIG_FILE_NAME)\n if os.path.exists(config_path):\n return config_path\n path = os.path.dirname(path)\n return None", "def getBlenderConfigPath(blenderversion):\n if sys.platform == 'linux':\n scriptspath = os.path.normpath(\n os.path.expanduser('~/.config/blender/{0}/config'.format(blenderversion))\n )\n elif sys.platform == 'darwin':\n scriptspath = os.path.normpath(\n os.path.expanduser(\n '~/Library/Application Support/Blender/{0}/config'.format(blenderversion)\n )\n )\n elif sys.platform == 'win32':\n scriptspath = os.path.normpath(\n os.path.expanduser(\n '~/AppData/Roaming/Blender Foundation/Blender/{0}/config'.format(blenderversion)\n )\n )\n else:\n scriptspath = 'ERROR: {0} not supported,'.format(sys.platform)\n return scriptspath", "def mip_config_path():\n return \"tests/fixtures/global_config.yaml\"", "def program_config(self, program):\n config = self.get_toml(program)\n if config:\n return os.path.expanduser(config)\n return os.path.join(self.bin_root(), \"bin\", \"{}{}\".format(\n program, self.exe_suffix()))", "def get_user_config_dir(options):\n return '/root/.spinnaker'", "def _get_deployment_config_file():\n config_path = cfg.CONF.find_file(\n cfg.CONF.paste_deploy['api_paste_config'])\n if config_path is None:\n return None\n\n return os.path.abspath(config_path)", "def find_conf():\n path = os.path.abspath(os.path.expanduser(os.getcwd()))\n while path not in ('', '/'):\n conf_path = os.path.join(path, 'dataplicity.conf')\n if os.path.exists(conf_path):\n return conf_path\n path = os.path.dirname(path)\n return None", "def _github_config(self, config_file_name):\n home = os.path.abspath(os.environ.get('HOME', ''))\n config_file_path = os.path.join(home, config_file_name)\n return config_file_path", "def app_config_home(self) -> str:\n if self.app_config_has(\"app_config_home_directory\"):\n return self.app_config()[\"app_config_home_directory\"]\n return os.path.join(os.path.expanduser(\"~\"), '.aiscalator')", "def configDir():\n return os.path.join(os.environ['HARNESSEDJOBSDIR'], 'config', getSiteName())", "def getConfigFile(self):\n if not self.__args.configfile:\n msg = \"not set configfile\"\n self.__logger.error(msg)\n return \"\"\n cf = os.getcwd() + os.sep + self.__args.configfile\n if not os.path.exists(self.__args.configfile):\n msg = \"file \" + cf + \" not exist!\"\n self.__logger.error(msg)\n return \"\"\n return cf", "def get_config_file():\n return deployr_config_repository.get_deployr_config_file()", "def get_user_config_filename(appname='notify'):\n import platform\n system = platform.system()\n if system == 'Windows':\n rootname = os.path.join(os.environ['APPDATA'], appname)\n filename = appname + \".cfg\"\n prefix = ''\n elif system == 'Linux':\n XDG_CONFIG_HOME = os.environ.get('XDG_CONFIG_HOME', None)\n rootname = XDG_CONFIG_HOME or os.path.join('~', '.config')\n rootname = os.path.expanduser(rootname)\n # check if XDG_CONFIG_HOME exists\n if not os.path.exists(rootname) and XDG_CONFIG_HOME is None:\n # XDG_CONFIG_HOME is not used\n rootname = os.path.expanduser('~')\n filename = appname + \".cfg\"\n prefix = '.'\n else:\n rootname = os.path.join(rootname, appname)\n filename = appname + \".cfg\"\n prefix = ''\n elif system == 'Darwin':\n rootname = os.path.expanduser('~')\n filename = appname + \".cfg\"\n prefix = '.'\n else:\n # Unknown\n rootname = os.path.expanduser('~')\n filename = appname + \".cfg\"\n prefix = ''\n return os.path.join(rootname, prefix + filename)", "def default_configfile():\n dirname=None\n if os.getenv(\"HOME\"):\n dirname=os.getenv(\"HOME\")\n elif os.getenv(\"USERPROFILE\"):\n dirname=os.getenv(\"USERPROFILE\")\n\n else:\n raise FattyException(\"No HOME or USERPROFILE variable set, unable to determine default config file\")\n\n return os.path.join(dirname,\".fattybugs\")", "def get_base_config(eva_installation_dir: Path) -> Path:\n # if eva package is installed into environment\n if importlib_resources.is_resource(\"eva\", EVA_CONFIG_FILE):\n with importlib_resources.path(\"eva\", EVA_CONFIG_FILE) as yml_path:\n return yml_path\n else:\n # For local dev environments without package installed\n return eva_installation_dir / EVA_CONFIG_FILE", "def get_config(self):\n root_folder = os.path.dirname(os.path.dirname(__file__)).replace('\\\\', '/')\n root_folder = root_folder.replace('/core', '/config')\n # print root_folder, '<----------------------------------------'\n proj_config = os.path.join(root_folder, self.project.lower()).replace('\\\\', '/')\n # print proj_config, '============================================='\n if not os.path.isfile(proj_config):\n proj_config = os.path.join(root_folder, 'default').replace('\\\\', '/')\n # print proj_config, '<========================================'\n return proj_config", "def get_production_config_file_path(path: pathlib.Path) -> pathlib.Path:\n return get_production_config_dir_path(path) / \"config.py\"", "def __setup_config_file_abspath():\n if \"APPDATA\" in os.environ:\n basedir = os.environ[\"APPDATA\"]\n elif \"HOME\" in os.environ:\n basedir = os.environ[\"HOME\"]\n else:\n raise AssertionError(\"APPDATA or HOME env vars must be defined \"\n \"to store config file\")\n abs_dir_path = os.path.join(\n basedir, TestManager.APPDATA_SUBDIRECTORY_NAME)\n os.makedirs(abs_dir_path, exist_ok=True, mode=0o660)\n return os.path.join(abs_dir_path, ConfigManager.CONFIG_FILE_NAME)", "def get_http_config_file_path(node_uuid):\n return os.path.join(get_http_boot_dir(), node_uuid, 'config')", "def _get_config_filename():\n return 'pylidc.conf' if sys.platform.startswith('win') else '.pylidcrc'", "def conf_dir(self):\r\n return self._conf_dir", "def platform_config_dir():\n if POSIX: # nocover\n dpath_ = os.environ.get('XDG_CONFIG_HOME', '~/.config')\n elif DARWIN: # nocover\n dpath_ = '~/Library/Application Support'\n elif WIN32: # nocover\n dpath_ = os.environ.get('APPDATA', '~/AppData/Roaming')\n else: # nocover\n raise NotImplementedError('Unknown Platform %r' % (sys.platform,))\n dpath = normpath(expanduser(dpath_))\n return dpath", "def _find_config_root(self) -> str:\n location = [\"apache2.conf\", \"httpd.conf\", \"conf/httpd.conf\"]\n for name in location:\n if os.path.isfile(os.path.join(self.root, name)):\n return os.path.join(self.root, name)\n raise errors.NoInstallationError(\"Could not find configuration root\")", "def config_abex_path(experiment_name: str) -> Path: # pragma: no cover\n return experiment_dir(experiment_name) / ABEX_CONFIG", "def get_github_config_path(self, config_file_name):\n home = os.path.abspath(os.environ.get('HOME', ''))\n config_file_path = os.path.join(home, config_file_name)\n return config_file_path", "def config_directory(self):\n\n return self.get_raw(\"config_directory\")", "def full_path(self, config_path=CONFIG_PATH):\n return os.path.join(config_path, self.filename)", "def find_config():\n print(\"in find_config()\")\n print(os.getcwd())\n print(os.listdir(os.getcwd()))\n print(os.path.expanduser(\"~/.pylcmodel\"))\n if os.path.isfile(os.path.join(os.getcwd(), \".pylcmodel\")):\n return os.path.join(os.getcwd(), \".pylcmodel\")\n elif os.path.isfile(os.path.expanduser(\"~/.pylcmodel\")):\n return os.path.expanduser(\"~/.pylcmodel\")\n else:\n raise FileNotFoundError(\"No .pylcmodel config file found.\")", "def _get_dev_conf_dir(self):\r\n is_ok, file_dir = (\r\n GlobalModule.EM_CONFIG.read_sys_common_conf(\r\n \"Cgwsh_device_dir_path\"))\r\n if not is_ok:\r\n raise IOError(\"Failed to get Config : Cgwsh_device_dir_path\")\r\n return file_dir", "def config_dir(self) -> Path:\n return self._config_dir", "def _get_egg_path(self):\n try:\n _dist = get_distribution('janitoo_nut')\n return _dist.__file__\n except AttributeError:\n return 'src-nut/config'", "def _cfg_path(argv):\n cfg_path = argv[1] if len(argv) > 1 else None\n _is_file = os.path.isfile\n if not cfg_path or not _is_file(cfg_path):\n if cfg_path:\n _info(\"no config at {}, trying the default location\".format(\n cfg_path))\n cfg_path = _DEFAULT_PATH\n if not _is_file(cfg_path):\n _info(\"no config at {}, exiting\".format(cfg_path))\n return None\n return cfg_path", "def get_config_path(self):\n\t\treturn call_sdk_function('PrlFoundVmInfo_GetConfigPath', self.handle)", "def _get_rcfile(self, fname):\n rcfile = fname or os.environ.get('OPENERPRCSERVERS') or os.path.join(get_home_dir(), '.openerprc.servers')\n if not os.path.exists(rcfile):\n log = logging.getLogger('common.options')\n additional_info = \"\"\n log.warn('Config file %s does not exist !%s'% (rcfile, additional_info ))\n return os.path.abspath(rcfile)", "def platform_config_filename(region, account_prefix, prod):\n return 'infra/platform-config/%s/%s/%s.json' % (\n account_prefix, \"prod\" if prod else \"dev\", region\n )", "def get_config_file_name(self):\n argv = sys.argv\n config_type = \"dev\" # default configuration type\n if None != argv and len(argv) > 1 :\n config_type = argv[1]\n config_file = config_type + \".cfg\"\n logger.info(\"get_config_file_name() return : \" + config_file)\n return config_file", "def _build_config_file_path(cls, filename):\n if os.path.exists(filename):\n return filename\n res = os.path.join(os.path.dirname(__file__), '..', 'config', filename)\n if not os.path.exists(res):\n raise ValueError(\"requested config file %s does not exist!\" % filename)\n return res", "def cfgPath( *args ):\n return '/'.join( [str( k ) for k in args] )", "def get_path(self, key):\n value = self.getn(key)\n if value is None:\n logger.warning(\"Specified config '%s' is None or not exist\" % key)\n return None\n if not isinstance(value, str):\n msg = \"Specified config '%s' is non-string: %s\" % (key, value)\n logger.error(msg)\n raise ValueError(msg)\n #\n path = os.path.expanduser(value)\n if not os.path.isabs(path):\n # Got relative path, try to convert to the absolute path\n if hasattr(self, \"userconfig\"):\n # User configuration loaded\n path = os.path.join(os.path.dirname(self.userconfig), path)\n else:\n logger.warning(\"Cannot convert to absolute path: %s\" % path)\n return os.path.normpath(path)", "def get_kube_config_file_path(self):\n return self._kube_config", "def get_xshear_config_dir(run):\n d=get_run_dir(run)\n return os.path.join(d, 'config')", "def get_config_file_path(filename):\n # Use __file__ to derive a path relative to this module's location which points to the tests data directory.\n relative_path = os.path.join(\n os.path.dirname(os.path.realpath(__file__)), \"..\", \"config_files\"\n )\n return os.path.join(os.path.abspath(relative_path), filename)", "def find_config_file(self):\n filename = self.values.get('config_file', Default('noy.json'))\n\n ignore_missing = False\n if isinstance(filename, Default):\n filename = filename.val\n ignore_missing = True\n\n filename = os.path.abspath(filename)\n if os.path.exists(filename):\n return filename\n elif not ignore_missing:\n raise MissingConfigFile(\"Config file doesn't exist at {}\".format(filename))", "def AptGetPathToConfig(vm):\n del vm\n return '/etc/mysql/mysql.conf.d/mysqld.cnf'", "def test_find_config_cur_dir(self, in_tmp_path):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\"image: bosybux\\n\")\n\n path, rel, _ = scuba.config.find_config()\n assert_paths_equal(path, in_tmp_path)\n assert_paths_equal(rel, \"\")", "def getDBPath():\n return os.path.join(CONFIG_DIR, CONFIG_DICT['common']['local_db'])", "def get_conf_output (self):\n return self.distribution.get_conf_filename(self.install_lib)", "def get_config(_config_file):\n ''' script absolute location '''\n abs_path = os.path.dirname(inspect.getfile(inspect.currentframe()))\n\n if _config_file[0] not in ('/', '~'):\n if os.path.isfile(os.path.join(abs_path, _config_file)):\n config_path = os.path.join(abs_path, _config_file)\n else:\n raise IOError('Failed to find config file')\n else:\n if os.path.isfile(_config_file):\n config_path = _config_file\n else:\n raise IOError('Failed to find config file')\n\n with open(config_path) as cjson:\n config_data = json.load(cjson)\n # config must not be empty:\n if len(config_data) > 0:\n return config_data\n else:\n raise Exception('Failed to load config file')", "def get_config_contents() -> str:\n config_file = os.environ.get(\"PYP_CONFIG_PATH\")\n if config_file is None:\n return \"\"\n try:\n with open(config_file, \"r\") as f:\n return f.read()\n except FileNotFoundError as e:\n raise PypError(f\"Config file not found at PYP_CONFIG_PATH={config_file}\") from e", "def getConfigFileName(self):\n return self._configFileName", "def get_production_config_dir_path(path: pathlib.Path) -> pathlib.Path:\n return path / \"shot-builder\"", "def bin_path(self) -> Path:\n return self._root_path / \"stefan-on-software-api-client\" / \"bin\"", "def confDir(self):\r\n return self._confDir", "def get_config_file_for_auto_config(self) -> Optional[Text]:\n return self.config_file", "def get_default_config_file() -> Path:\n return get_path_to_pyflow() / \"pyflow\" / \"conf\" / CONFIG_FILE", "def _findconfigfile():\n\n # A ordered list of possible config files\n configfiles = [\"~/.githubhooksrc\",\n \"/etc/githubhooks\"]\n\n for configfile in configfiles:\n if os.path.isfile(os.path.expanduser(configfile)):\n return os.path.expanduser(configfile)\n\n # No valid config file found\n print \"ERROR: No valid config file found in any of the following locations:\"\n for configfile in configfiles:\n print \" - %s\" % configfile\n sys.exit(1)", "def filename(resname):\n # check the HOME for personal config file\n prv_filename = os.path.join(os.getenv(\"HOME\"), \".aphla\", resname)\n if os.path.exists(prv_filename):\n return prv_filename\n else:\n # use the config within distribution\n return resource_filename(__name__, resname)", "def _config_path(res, ctx):\n\n if _has_error_code(res):\n return print_errors(res, ctx)\n\n return res['path']", "def systemdir():\n if platform == 'windows':\n return os.path.join(os.environ['ProgramFiles'], 'automaton')\n else:\n return \"/etc/automaton/\"", "def get_ocio_path():\n bl_path = os.getcwd()\n version = f'{bpy.app.version[0]}' + '.' + f'{bpy.app.version[1]}'\n cs_folder = os.path.join(bl_path, version, 'datafiles', 'colormanagement')\n\n return os.path.join(cs_folder, 'config.ocio')", "def get_pecan_config():\n filename = api_config.__file__.replace('.pyc', '.py')\n return filename", "def get_default_config_filename():\n if 'PYWREN_CONFIG_FILE' in os.environ:\n config_filename = os.environ['PYWREN_CONFIG_FILE']\n # FIXME log this\n\n elif os.path.exists(\".pywren_config\"):\n config_filename = os.path.abspath('.pywren_config')\n\n else:\n config_filename = get_default_home_filename()\n\n return config_filename", "def get_data_path(name):\n js = open('config.json').read()\n data = json.loads(js)\n return os.path.expanduser(data[name]['data_path'])", "def sirsam_bs_conf(sirsam_bootstrap):\n return os.path.join(sirsam_bootstrap, 'bootstrapping.yaml')", "def GetPath () :\n return sys.hal_log_values [\"__log_path\"]", "def _find_config_file(self) -> str or None:\n import os\n\n for path in self.paths:\n path = os.path.expanduser(path)\n for extension in self.file_extensions:\n for file_name in self.file_names:\n file_path = os.path.join(path, \"{}.{}\".format(file_name, extension))\n if os.path.isfile(file_path):\n return file_path\n\n return None", "def filename(self):\n return f'{self._peer.interface}.conf'", "def YumGetPathToConfig(vm):\n raise NotImplementedError" ]
[ "0.7627292", "0.7395096", "0.73803973", "0.72431105", "0.71532726", "0.7132535", "0.71078205", "0.7089271", "0.70785385", "0.70739216", "0.7015473", "0.7002854", "0.69692624", "0.6962831", "0.6919271", "0.6863968", "0.68277985", "0.6817259", "0.6779527", "0.675732", "0.67500997", "0.67495424", "0.67368305", "0.6680615", "0.667474", "0.66669726", "0.6665688", "0.66622084", "0.66566414", "0.66180927", "0.65912443", "0.6573435", "0.65507424", "0.6546892", "0.6537619", "0.6536339", "0.6534651", "0.65131944", "0.6506588", "0.64996", "0.6499", "0.6486252", "0.648536", "0.64809066", "0.64768916", "0.64628357", "0.6457683", "0.64340544", "0.64206535", "0.6410845", "0.63826203", "0.63643295", "0.6329323", "0.6312299", "0.6306684", "0.6301833", "0.629976", "0.62963265", "0.6292997", "0.629116", "0.6284619", "0.6264891", "0.6255323", "0.6241926", "0.6215619", "0.620502", "0.62019163", "0.61992776", "0.61973447", "0.6191916", "0.61888963", "0.6186517", "0.61845046", "0.61708003", "0.61647415", "0.61630917", "0.61602426", "0.61477387", "0.61427885", "0.6120701", "0.61151284", "0.61109406", "0.61043304", "0.6097693", "0.60860485", "0.6081319", "0.60683817", "0.60548824", "0.6052036", "0.60179824", "0.60112435", "0.6006727", "0.59996843", "0.5998365", "0.5990393", "0.5986546", "0.59805465", "0.59722507", "0.5971246", "0.59647" ]
0.8231032
0
Create a new model from raw data, like word frequencies, Brown clusters and word vectors.
def init_model(lang, output_dir, freqs_loc=None, clusters_loc=None, vectors_loc=None, prune_vectors=-1): if freqs_loc is not None and not freqs_loc.exists(): prints(freqs_loc, title=Messages.M037, exits=1) clusters_loc = ensure_path(clusters_loc) vectors_loc = ensure_path(vectors_loc) probs, oov_prob = read_freqs(freqs_loc) if freqs_loc is not None else ({}, -20) vectors_data, vector_keys = read_vectors(vectors_loc) if vectors_loc else (None, None) clusters = read_clusters(clusters_loc) if clusters_loc else {} nlp = create_model(lang, probs, oov_prob, clusters, vectors_data, vector_keys, prune_vectors) if not output_dir.exists(): output_dir.mkdir() nlp.to_disk(output_dir) return nlp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_model(self, documents):\n self.vectorizer = TfidfVectorizer(\n stop_words='english', lowercase=True).fit(documents)\n self.vectors = self.vectorizer.transform(documents)", "def build_model_from_inputs(self):\n if self.term_list is None:\n # no supplied token list -- use vocabulary of the training dataset\n # self.term_list = self.vocabulary\n # info(\"Setting bag dimension to {} from input vocabulary.\".format(len(self.term_list)))\n # will generate the vocabulary from the input\n pass\n info(f\"Building {self.name} model\")\n bagger = None\n if self.config.max_terms is not None:\n bagger = Bag(vocabulary=self.term_list, weighting=self.base_name, ngram_range=self.ngram_range, max_terms=self.config.max_terms)\n else:\n bagger = Bag(vocabulary=self.term_list, weighting=self.base_name, ngram_range=self.ngram_range)\n\n train_idx = self.indices.get_train_instances()\n texts = Text.get_strings(self.text.data.get_slice(train_idx))\n bagger.map_collection(texts, fit=True, transform=False)\n self.term_list = bagger.get_vocabulary()\n\n self.dimension = len(self.term_list)\n self.config.dimension = self.dimension", "def create_train_model(self):\n st = LancasterStemmer()\n with open(self.data_path, encoding='utf8') as f_name:\n sentences = [[st.stem(w) for w, t in pos_tag(line.lower().split()) if 'N' in t] for line in f_name]\n sentences = [filter(lambda x: len(x) > 2, (word.strip(punctuation) for word in sentences)) for sent in sentences]\n model = Word2Vec(sentences,\n min_count=self.min_count,\n size=self.size,\n window=self.window,\n workers=4)\n model.save(self.model_path)", "def read_model(self):\n \n # words dictionary\n f = open(self.name + \"_words\", 'r') \n d_str = f.read()\n f.close()\n \n d = dict(eval(d_str))\n self.words = d\n\n # word_lengths dictionary\n f = open(self.name + \"_word_lengths\", 'r') \n d_str = f.read()\n f.close()\n \n d = dict(eval(d_str))\n self.word_lengths = d\n\n # stems dictionary\n f = open(self.name + \"_stems\", 'r') \n d_str = f.read()\n f.close()\n \n d = dict(eval(d_str))\n self.stems = d\n\n # sentence_lengths dictionary\n f = open(self.name + \"_sentence_lengths\", 'r') \n d_str = f.read()\n f.close()\n \n d = dict(eval(d_str))\n self.sentence_lengths = d\n\n # ten most common words\n f = open(self.name + \"_common_word\", 'r') \n d_str = f.read()\n f.close()\n \n d = list(eval(d_str))\n self.common_word = d", "def read_model(self):\n f = open(self.name + '_' + 'words', 'r')\n self.words = f.read()\n f.close()\n elf.words = dict(eval(self.words))\n \n f = open(self.name + '_' + 'word_lengths', 'r')\n self.word_lengths = f.read()\n f.close()\n self.word_lengths = dict(eval(self.word_lengths))\n\n f = open(self.name + '_' + 'sentence_lengths', 'r')\n self.sentence_lengths = f.read()\n f.close()\n self.sentence_lengths = dict(eval(self.sentence_lengths))\n\n f = open(self.name + '_' + 'stems', 'r')\n self.stems = f.read()\n f.close()\n self.stems = dict(eval(self.stems))\n\n f = open(self.name + '_' + 'commas_per_sentence', 'r')\n self.commas_per_sentence = f.read()\n f.close()\n self.commas_per_sentence = dict(eval(self.commas_per_sentence))", "def read_model(self):\n filename = self.name + '_words'\n f = open(filename, 'r') \n d_str = f.read() \n f.close()\n d = dict(eval(d_str))\n self.words = d\n \n filename2 = self.name + '_word_lengths'\n f = open(filename2, 'r') \n d2_str = f.read() \n f.close()\n d2 = dict(eval(d2_str))\n self.word_lengths = d2\n \n filename3 = self.name + '_stems'\n f = open(filename3, 'r') \n d3_str = f.read() \n f.close()\n d3 = dict(eval(d3_str))\n self.stems = d3\n \n filename4 = self.name + '_sentence_lengths'\n f = open(filename4, 'r') \n d4_str = f.read() \n f.close()\n d4 = dict(eval(d4_str))\n self.sentence_lengths = d4\n \n filename5 = self.name + '_punctuation'\n f = open(filename5, 'r') \n d5_str = f.read() \n f.close()\n d5 = dict(eval(d5_str))\n self.punctuation = d5", "def __init__(self, model_name):\n self.name = str(model_name)\n self.numwords = 0\n self.words = {} #how many types of words\n self.word_lengths = {} #how many word lengths\n self.stems = {} #how many stems\n self.sentence_lengths = {} #how many sentence lengths\n self.common_word = [] #top ten most common words", "def __init__(self, model_name):\n self.name = model_name\n self.words = {}\n self.word_lengths = {}\n self.sentence_lengths = {}\n self.stems = {}\n self.commas_per_sentence = {}", "def __init__(self, filename, modelname, dim_model=100):\n self.dim = dim_model\n model = word2vec.Word2Vec.load(modelname) # modelname = '../Utils/word2vec_model_allskills'\n f = open(filename,\"rb\") # filename = \"../Data/all_top_skills_final_fre.txt\"\n key_list = pickle.load(f)\n f.close()\n self.data = pd.DataFrame(columns=[np.zeros([self.dim])]) ## how to determine the dimension here\n j = 0\n self.skill = []\n for i in key_list.keys():\n try: \n self.data.loc[j] = model[i]\n j+=1\n self.skill.append(i)\n except:\n j = j\n self.cluster = []", "def prepareData(self):\n\t\tprint ('')\n\t\tfrom keras.preprocessing.sequence import pad_sequences\n\t\tfrom sklearn.model_selection import train_test_split\n\t\tfrom keras.utils import to_categorical\n\t\timport numpy as np\n\n\t\tfrom sklearn.preprocessing import LabelBinarizer, LabelEncoder\n\n\t\tX_snt = [[self.word2idx[w] if w in self.word2idx else self.word2idx[self.word_unk_token] for w in s] for s in self.x_document]\n\t\ty_tag = [[self.tag2idx[t]] for t in self.y_document]\n\n\t\tX_snt = pad_sequences(maxlen=self.parameters['max_doc_len'], sequences=X_snt, padding='post', value=self.word2idx[self.word_pad_token])\n\t\ty_tag = to_categorical(y_tag, self.tags_len)\n\n\t\tprint (\"\\tRandom:\\t\", self.random)\n\t\tprint (\"\\tTest size:\\t\", self.split_train_test)\n\n\t\tself.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X_snt, y_tag, test_size=self.split_train_test, random_state=self.random)\n\n\t\tself.X_train = np.array(self.X_train)\n\t\tself.X_test = np.array(self.X_test)\n\t\tself.y_train = np.array(self.y_train)\n\t\tself.y_test = np.array(self.y_test)\n\n\t\tprint ('\\n\\tWords: {}\\t{}'.format(self.X_train.shape, self.X_test.shape) )\n\t\tprint ('\\tTags: {}\\t{}\\n'.format(self.y_train.shape, self.y_test.shape))", "def model(self):\n filePath = self.config['data_path']['train_data']\n data = self.loadCSV(filePath)\n cleandata = self.preprocess(data)\n X, y = self.dataSplit(cleandata)\n X = self.CountVect(X, self.config['transform_path']['transform_model_path'])\n X_train, X_test, y_train, y_test = self.TrainTestSplit(X, y)\n self.MultinomialNB(X_train, X_test, y_train, y_test, self.config['nlp_path']['model_path'])", "def data_word2vec_one_label(input_file,\n word2vec_model):\n vocab = dict([(k, v.index) for (k, v) in word2vec_model.wv.vocab.items()])\n\n def _token_to_index(content):\n result = []\n for item in content:\n word2id = vocab.get(item)\n if word2id is None:\n word2id = 0\n result.append(word2id)\n return result\n\n\n with open(input_file) as fin:\n raw_tokens_list_gov = []\n raw_tokens_list_art = []\n test_id_list = []\n content_index_list_gov = []\n content_index_list_art = []\n # labels_list = []\n onehot_labels_list = []\n labels_num_list = []\n total_line = 0\n\n for each_line in fin:\n data = json.loads(each_line)\n ds_art = data['testid']\n ds = ds_art.split(\"_\")[0]\n art = ds_art.split(\"_\")[1][1:-1]\n test_id = ds + \"_\" + art\n features_content_gov = data['gov']\n features_content_art = data['art']\n label = data['label']\n\n test_id_list.append(test_id)\n content_index_list_gov.append(_token_to_index(\n features_content_gov))\n content_index_list_art.append(_token_to_index(\n features_content_art))\n\n raw_tokens_list_gov.append(features_content_gov)\n raw_tokens_list_art.append(features_content_art)\n\n # labels_list.append(label)\n # onehot_labels_list.append(_create_onehot_labels(labels_index,\n # num_labels))\n onehot_labels_list.append(label)\n labels_num = 1\n labels_num_list.append(labels_num)\n total_line += 1\n\n\n class _Data:\n def __init__(self):\n pass\n\n @property\n def number(self):\n return total_line\n\n @property\n def testid(self):\n return test_id_list\n\n @property\n def raw_tokens_gov(self):\n return raw_tokens_list_gov\n\n @property\n def raw_tokens_art(self):\n return raw_tokens_list_art\n\n @property\n def tokenindex_gov(self):\n return content_index_list_gov\n\n @property\n def tokenindex_art(self):\n return content_index_list_art\n\n # @property\n # def labels(self):\n # return labels_list\n\n @property\n def onehot_labels(self):\n return onehot_labels_list\n\n @property\n def labels_num(self):\n return labels_num_list\n\n return _Data()", "def __init__(self, model=\"glove.840B.300d.txt\", dictionary=\"words.txt\", pattern=\"^[a-z][a-z-]*[a-z]$\"):\n\n # Keep unique words matching pattern from file\n words = set()\n with open(dictionary, \"r\", encoding=\"utf8\") as f:\n for line in f:\n if re.match(pattern, line):\n words.add(line.rstrip(\"\\n\"))\n\n # Join words with model\n vectors = {}\n with open(model, \"r\", encoding=\"utf8\") as f:\n for line in f:\n tokens = line.split(\" \")\n word = tokens[0]\n if word in words:\n vector = numpy.asarray(tokens[1:], \"float32\")\n vectors[word] = vector\n self.vectors = vectors", "def __init__(self, model_name):\n\n self.name = model_name\n self.words = {}\n self.word_lengths = {}\n self.stems={}\n self.sentence_lengths={}\n self.endings={}\n self.total = 0", "def __init__(self, model_name):\n self.name = model_name\n self.words = {}\n self.word_lengths = {}\n self.stems = {}\n self.sentence_lengths = {}\n self.punctuation = {}", "def __init__(self, model, data=None, labels=None):\n\t\tif data is None or labels is None:\n\t\t\traise AttributeError(\"No Data in a constructor provided.\")\n\n\n\t\tself.models = {\n\t\t\t\"knn\": KNeighborsClassifier(n_neighbors=9, algorithm=\"brute\", weights=\"distance\"),\n\t\t\t\"naive_bayes\": GaussianNB(),\n\t\t\t\"svm\": SVC(C=15.6, gamma=\"scale\", kernel=\"rbf\"),\n\t\t\t\"decision_tree\": DecisionTreeClassifier(criterion=\"entropy\", max_depth=55, splitter=\"best\"),\n\t\t\t\"random_forest\": RandomForestClassifier(n_estimators=50, criterion=\"entropy\"),\n\t\t\t\"extra_tree\": ExtraTreesClassifier(n_estimators=122, criterion=\"entropy\"),\n\t\t\t\"gradient_boost\": GradientBoostingClassifier(n_estimators=33, learning_rate=0.14),\n\t\t\t\"mlp\": MLPClassifier(solver=\"lbfgs\", hidden_layer_sizes=(13, 12), alpha=5E-06)\n\n\t\t}\n\n\t\tself.le = LabelEncoder()\n\t\tself.model = self.models[model]\n\n\t\tself.training_data = data\n\t\tself.training_labels = self.le.fit_transform(labels)\n\t\tself.feature_names = ['EARL','L1','L2','L3', 'EARR', 'R1', 'R2', 'R3', 'MAR', 'M1', 'M2', 'M3', 'M4']\n\t\tself.feature_mask = [True,True,True,True,True,True,True,True,True,True,True,True,True]", "def __init__(self, model_name):\r\n self.name = model_name\r\n self.words = ({})\r\n self.word_lengths = ({})\r\n self.stems = ({})\r\n self.sentence_lengths = ({})\r\n self.punctuation = ({})", "def from_raw(\n cls,\n data_file: str,\n tokenizer: Tokenizer,\n processor: TextProcessor,\n word_vectors: WordVectors,\n force_single_answer: bool = False,\n char_mapping: Optional[Dict[str, int]] = None,\n ) -> \"Corpus\":\n context_qas = cls.read_context_qas(\n data_file, tokenizer, processor, force_single_answer\n )\n\n def return_one() -> int:\n return 1\n\n token_mapping = defaultdict(return_one, word_vectors.word_to_idx)\n if char_mapping is None:\n char_mapping = cls.compute_char_indices(context_qas)\n stats = cls.compute_stats(context_qas, token_mapping, char_mapping)\n return cls(context_qas, token_mapping, char_mapping, stats, data_file)", "def train(self):\n # >>> YOUR ANSWER HERE\n\n fake_docs = []\n fake_words = []\n fake_words_freq = {}\n real_docs = []\n real_words = []\n real_words_freq = {}\n\n # load fake data of the training dataset, store the docs and words\n fake_data = open(self.train_data['fake']).readlines()\n for sentence in fake_data:\n preprocess_sentence = sentence.strip()\n fake_docs.append(preprocess_sentence)\n fake_words.extend(preprocess_sentence.split())\n\n # load real data of the training dataset, store the docs, words and word frequencies.\n real_data = open(self.train_data['real']).readlines()\n for sentence in real_data:\n preprocess_sentence = sentence.strip()\n real_docs.append(preprocess_sentence)\n real_words.extend(preprocess_sentence.split())\n\n # remove stop words if necessary\n if self.REMOVE_STOPWORDS:\n fake_words = [word for word in fake_words if word not in self.stopwords]\n real_words = [word for word in real_words if word not in self.stopwords]\n\n # calculate all words' frequency\n for word in fake_words:\n self.vocabulary.add(word)\n fake_words_freq[word] = fake_words_freq.get(word, 0) + 1\n for word in real_words:\n self.vocabulary.add(word)\n real_words_freq[word] = real_words_freq.get(word, 0) + 1\n\n # pre-calculate the number of all docs, the number of docs per class and words frequency per class for\n # calculation in the training loop.\n n_doc = len(fake_docs) + len(real_docs)\n n_class = {'fake': len(fake_docs), 'real': len(real_docs)}\n big_doc_dict = {'fake': fake_words_freq, 'real': real_words_freq}\n fake_words_num = 0\n real_words_num = 0\n for w in self.vocabulary:\n fake_words_num += fake_words_freq.get(w, 0)\n real_words_num += real_words_freq.get(w, 0)\n words_frequency_per_class = {'fake': fake_words_num, 'real': real_words_num}\n\n # Training\n for c in self.classes:\n self.logprior[c] = math.log(n_class[c] / n_doc)\n for w in self.vocabulary:\n count_w_c = big_doc_dict[c].get(w, 0)\n log_likelihood = math.log((count_w_c + 1) / (len(self.vocabulary) + words_frequency_per_class[c]))\n self.loglikelihood[(w, c)] = log_likelihood\n # >>> END YOUR ANSWER", "def trainingModel4wmd(corpus):\n model = Word2Vec(corpus, workers = nCores, size = 100, window = 300,\n min_count = 2, iter = 250)\n # model = Word2Vec(corpus)\n\n # use the following if we want to normalize the vectors\n model.init_sims(replace=True)\n\n return model", "def parse_input(input_data, dictionary, model):\n vec_text = TextBlob(input_data).words.lower().lemmatize()\n vec_bow = dictionary.doc2bow(vec_text)\n return model[vec_bow]", "def load_word2vec_model():\n logging.basicConfig(\n format='%(asctime)s : %(levelname)s : %(message)s', \n level=logging.INFO)\n model_path = '/playpen/home/tongn/GoogleNews-vectors-negative300.bin'\n model = KeyedVectors.load_word2vec_format(fname=model_path, binary=True)\n return model", "def read_model(self):\n filename=self.name + '_words'\n self.words=file_read(filename)\n\n filename2= self.name+'_word_lengths'\n self.word_lengths=file_read(filename2)\n\n filename3=self.name+'_stems'\n self.stems=file_read(filename3)\n\n filename4=self.sentence_lengths+'_sentence_lengths'\n self.setence_lengths=file_read(filename4)\n\n filename5= self.endings+'_endings'\n self.endings=file_read(filename5)", "def load_data(dataset_path, word2vec_model_path, n_class=2, max_seq_len_cutoff=50):\n\n dataset_file = open(dataset_path, \"r\", encoding='utf-8')\n dataset_content = dataset_file.readlines()\n\n x_text = []\n y = []\n for element in dataset_content:\n element = element.lower()\n element = element.split(\"\\t\")\n label = int(element[0])\n text = element[1].strip()\n if (len(text) == 0):\n continue\n x_text.append(text)\n tmp_lable = np.zeros(n_class)\n if(n_class == 2):\n tmp_lable[label] = 1\n else:\n tmp_lable[label - 1] = 1\n y.append(tmp_lable)\n\n\n x_text = clean_str(x_text, max_seq_len_cutoff)\n\n sequence_length = max(len(x) for x in x_text)\n\n vocabulary, vocabulary_inv = build_vocab(x_text)\n y = np.asarray(y)\n\n word2vec_Model = Load_Model(word2vec_model_path)\n word2vec_vocab = word2vec_Model.vocab\n word2vec_vec = word2vec_Model.syn0\n\n print(\"word2vec len is: \", len(word2vec_vec))\n tmp = word2vec_vocab['real']\n tmp1 = copy.deepcopy(tmp)\n word_vector = np.random.uniform(low=-0.25, high=0.25, size=(1,word2vec_vec.shape[1]))\n word2vec_vec = np.append(word2vec_vec, word_vector, axis=0)\n tmp1.index = len(word2vec_vec)-1\n word2vec_vocab['<un_known>'] = tmp1\n\n return [x_text, y, sequence_length, vocabulary, vocabulary_inv, word2vec_vocab, word2vec_vec]", "def retrain_model(self, new_sentences, with_punctiations):\n if with_punctiations:\n model_ = Word2Vec.load('./model/model_word2vec.bin')\n else:\n model_ = Word2Vec.load('./model/model_no_punctuation_word2vec.bin')\n\n model_.build_vocab(new_sentences, update=True)\n model_.train(new_sentences, total_examples=model_.corpus_count, epochs=model_.iter)\n\n if with_punctiations:\n model_.save('./model/model_word2vec.bin')\n else:\n model_.save('./model/model_no_punctuation_word2vec.bin')\n\n\n pass", "def __init__(self, path_to_glove='glove.6B.200d.txt',\n embedding_dim=200, prep_Data_from = 'train', purpose='train_model'):\n assert prep_Data_from in ['train', 'test', 'val']\n assert purpose in ['train_model', 'test_class']\n \n if purpose == 'train_model':\n path_to_train = 'train2.tsv'\n path_to_val = 'val2.tsv'\n path_to_test = 'test2.tsv'\n else:\n path_to_train = 'sample_train.tsv'\n path_to_test = 'sample_test.tsv'\n path_to_val = 'sample_val.tsv'\n\n train_Dataframe = pandas.read_csv(path_to_train, sep='\\t', header=None).dropna()\n test_Dataframe = pandas.read_csv(path_to_test, sep='\\t', header=None).dropna()\n val_Dataframe = pandas.read_csv(path_to_val, sep='\\t', header=None).dropna()\n\n self.embeddings = create_glove_dict(path_to_glove)\n self.embedding_dim = embedding_dim\n self.dataframe = pandas.concat([train_Dataframe, test_Dataframe, val_Dataframe])\n\n self.justification_max = get_max_length(self.dataframe, 15)\n self.statement_max = get_max_length(self.dataframe, 3)\n\n if prep_Data_from == 'train':\n self.dataframe = train_Dataframe\n elif prep_Data_from == 'val':\n self.dataframe = val_Dataframe\n elif prep_Data_from == 'test':\n self.dataframe = test_Dataframe\n\n del train_Dataframe, test_Dataframe, val_Dataframe\n\n self.labels = {\"true\": 0,\n \"mostly-true\": 1,\n \"half-true\": 2,\n \"barely-true\": 3,\n \"false\": 4,\n \"pants-fire\": 5}", "def construct_embedding(self):\n i = 0\n self.load_dicts()\n embedding_shape = (max(self.word2idx.values()) + 1,\n self.embedding_size)\n self.embedding = np.zeros(embedding_shape)\n\n with open(self.config.word_vec_fi_glove, 'r') as fi:\n for line in fi:\n word_vec = line.split(\" \")[1:]\n self.embedding[i, :] = np.array(word_vec, dtype=np.float32)\n i += 1\n\n self.write_embedding()", "def load_word2vec_model():\n model = Word2Vec.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True, norm_only=True)\n return model", "def train_model(schema,fieldsToRead = None):\n\tif not fieldsToRead:\n\t\tfieldsToRead = schema[\"fields\"].keys()\n\n\tif(\"vector_size\" in schema):\n\t\tvectorSize = schema[\"vector_size\"]\n\telse:\n\t\tvectorSize = DEFAULT_VECTOR_SIZE\n\n\tsentences = []\n\t# build sentences:\n\tprint \"Building Feature vectors...\"\n\n\tread_sentences(schema, lambda x : sentences.append(merge_sentences_to_single_sentence(x, fieldsToRead)))\n\tprint \"Read \" + str(len(sentences)) + \" documents\"\n\tprint \"Training Model...\"\n\tmodelPath = model_path(schema)\n\tweightMatrixPath = weight_matrix_path(schema)\n\tsentences = transpose_sentences(sentences)\n\tmodel = Word2Vec(sentences, size=vectorSize, window=5, min_count=1, workers=4)\n\tmodel.save(modelPath)\n\tmodel.save_word2vec_format(weightMatrixPath)\n\tprint \"Finished training\"\n\treturn model", "def prepare_length_features(text_counts, custom_vec, length_processed_flora_data_frame):\n vocab = custom_vec.get_feature_names() # https://stackoverflow.com/questions/39121104/how-to-add-another-feature\n # -length-of-text-to-current-bag-of-words-classificati\n\n length_model_data_frame = pd.DataFrame(text_counts.toarray(), columns=vocab)\n length_model_data_frame = pd.concat(\n [length_model_data_frame, length_processed_flora_data_frame['length'].reset_index(drop=True)], axis=1)\n\n length_model_data_frame_values = length_model_data_frame.values.astype(np.float64)\n length_model_sparse = sparse.csr_matrix(length_model_data_frame_values)\n\n assert length_model_sparse.shape > text_counts.shape, 'Length model should have one more column of data than BOW ' \\\n 'model '\n return length_model_sparse", "def create_data_model():\n data = {}\n weights = [48, 30, 42, 36, 36, 48, 42, 42, 36, 24, 30, 30, 42, 36, 36]\n values = [10, 30, 25, 50, 35, 30, 15, 40, 30, 35, 45, 10, 20, 30, 25]\n data['num_items'] = len(weights)\n data['all_items'] = range(data['num_items'])\n data['weights'] = weights\n data['values'] = values\n data['bin_capacities'] = [100, 100, 100, 100, 100]\n data['num_bins'] = len(data['bin_capacities'])\n data['all_bins'] = range(data['num_bins'])\n return data", "def train():\n counts = {size: dict() for size in NGRAM_SIZES}\n for word in tqdm.tqdm(word_iterator(\"resources/datasets\")):\n if word == \"\":\n continue\n for size in NGRAM_SIZES:\n for token in ngrams(word, 2 * size):\n left, right = token[:size], token[size:]\n counts[size].setdefault(left, dict())\n counts[size][left].setdefault(right, 0)\n counts[size][left][right] += 1\n model = {size: dict() for size in NGRAM_SIZES}\n for size in NGRAM_SIZES:\n for left in counts[size]:\n total = sum(counts[size][left].values())\n model[size][left] = dict()\n for right in counts[size][left]:\n model[size][left][right] = math.log(\n counts[size][left][right] / total)\n with open(MODEL_FILENAME, \"wb\") as file:\n pickle.dump(model, file)", "def make_data(docs):\n vocab=make_vocab_from_docs(docs)\n word_dicts=list(map(lambda x: parse_doc(x,vocab),docs))\n Nd=list(map(len,docs))\n M,V=len(docs),len(vocab)\n return (vocab,word_dicts,Nd,M,V)", "def load_data():\r\n from sklearn.feature_extraction.text import CountVectorizer\r\n\r\n # Load the data\r\n\r\n with open(\"clean_real.txt\", 'r') as RealNews:\r\n RealStrAr = RealNews.read().split('\\n')\r\n\r\n with open(\"clean_fake.txt\", 'r') as FakeNews:\r\n FakeStrAr = FakeNews.read().split('\\n')\r\n\r\n # Preprocess it using a vectorizer\r\n\r\n MyCoolVectorizer = CountVectorizer()\r\n X = MyCoolVectorizer.fit_transform(RealStrAr + FakeStrAr)\r\n\r\n RealLabels = np.ones((len(RealStrAr), 1)) # means real\r\n FakeLabels = np.zeros((len(FakeStrAr), 1)) # means fake\r\n AllLabels = np.append(RealLabels, FakeLabels, axis=0)\r\n\r\n FinalTensor = np.append(X.toarray(), AllLabels, axis=1)\r\n\r\n # Randomize it and split it\r\n\r\n np.random.shuffle(FinalTensor)\r\n\r\n # divide and multiply by 2 just to make sure it's even\r\n ROUGHLY70 = 2 * ((FinalTensor.shape[0] * 70 / 100) / 2)\r\n ROUGHLY15 = (FinalTensor.shape[0] - ROUGHLY70) / 2\r\n\r\n # TEST SET VALIDATION SET TRAINING SET DICTIONARY\r\n return (FinalTensor[:ROUGHLY15], FinalTensor[ROUGHLY15 : 2 * ROUGHLY15], FinalTensor[-ROUGHLY70:], MyCoolVectorizer.get_feature_names())", "def __init__(self):\n self.tokenizer = BOWTokenizer(\n English()\n ) # the tokenizer must have a tokenize() and parse() function.\n self.labelEncoder = LabelEncoder()\n self.vectorizer = CountVectorizer(\n tokenizer=self.tokenizer.tokenize, ngram_range=(1, 1)\n )\n self.decode_params = {}", "def load_glove_data():\n glove_path = path.join('..', 'data', 'glove', 'glove.twitter.27B.200d.txt')\n f = open(glove_path,'r')\n \n model = {}\n for line in f:\n splitLine = line.split()\n word = splitLine[0]\n embedding = np.array([float(val) for val in splitLine[1:]])\n model[word] = embedding\n \n return model", "def generate_corpus(model, sample):\r\n \r\n dl_corpus = []\r\n for word in sample:\r\n if word in model:\r\n dl_corpus.append(model[word])\r\n else:\r\n dl_corpus.append([0]*VECTOR_DIM)\r\n\r\n return [dl_corpus]", "def build_data(self, data_folder, cv=10, clean_string=False):\n revs = []\n # pos_file = loadmodel(data_folder[0])\n # neg_file = loadmodel(data_folder[1])\n pos_texts = loadmodel(data_folder[0]).get(\"content\")\n neg_texts = loadmodel(data_folder[1]).get(\"content\")\n vocab = defaultdict(float)\n happyList = [ \":-)\", \":)\", \":D\", \":o)\", \":]\", \":3\", \":c)\", \":>\", \"=]\", \"8)\", \"=)\", \":}\", \":^)\", \":?)\", \":-)\", \": )\", \": D\", \": o)\", \":]\", \": 3\", \":c)\", \":>\", \"= ]\", \"8 )\", \"= )\", \": }\", \":^)\", \":?)\" ]\n sadList = [ \">:[\", \":-(\", \":(\", \":-c\", \":c\", \":-<\", \":?C\", \":<\", \":-[\", \":[\", \":{\",\">:[\", \":-(\", \": (\", \":-c\", \": c\", \": -<\", \": ?C\", \": <\", \": -[\", \": [\", \": {\" ]\n for line in pos_texts:\n rev = []\n rev.append(line.strip())\n\n if clean_string:\n orig_rev = self.dc.clean_str(\" \".join(rev))\n else:\n orig_rev = \" \".join(rev).lower()\n #print orig_rev\n words = set(orig_rev.split())\n for word in words:\n if word in happyList or word in sadList:\n pass\n else:\n vocab[word] += 1\n datum = {\"y\":1,\n \"text\": orig_rev,\n \"num_words\": len(orig_rev.split()),\n \"split\": np.random.randint(0,cv)}\n revs.append(datum)\n\n for line in neg_texts:\n rev = []\n rev.append(line.strip())\n if clean_string:\n orig_rev = self.dc.clean_str(\" \".join(rev))\n else:\n orig_rev = \" \".join(rev).lower()\n words = set(orig_rev.split())\n for word in words:\n if word in happyList or word in sadList:\n pass\n else:\n vocab[word] += 1\n datum = {\"y\":0,\n \"text\": orig_rev,\n \"num_words\": len(orig_rev.split()),\n \"split\": np.random.randint(0,cv)}\n revs.append(datum)\n return revs, vocab", "def __init__(self):\n # parse MSR data\n test_data, sent_dict, pair_dict = parse()\n # word mover model -- take long to load the model!\n wm_model = WMD()\n # copnvert the ID->String dict to ID-> token dict\n candidate_dict = wmd_utils.sent_dict_to_tok_dict(sent_dict)\n wm_model.evaluate_model(candidate_dict, pair_dict)", "def __init__(self, data_file, target_file, base_vocab='!abcdefghijklmnopqrstuvwqxyz'):\n raw_data = readFileIntoArray(data_file)\n raw_targets = readFileIntoArray(target_file)\n\n \"\"\" Initialize the underlying vocabulary by assigning vectors to letters \"\"\"\n self.base_vocab = base_vocab # Maybe generate this procedurally\n self.vocab = generateVocabVectors(self.base_vocab)\n\n \"\"\" Convert the targets to a vector \"\"\"\n self.targetTranslate = set(raw_targets)\n optDict = dict(zip(self.targetTranslate, range(0, len(self.targetTranslate))))\n self.targets = np.ndarray([len(raw_targets)])\n for i in range(len(raw_targets)):\n self.targets[i] = optDict[raw_targets[i]]\n self.targets = self.targets.astype(np.int32)\n\n \"\"\" Calculate the max vector length \"\"\"\n # (we won't need this once we fix our underlying chainer model)\n self.max_phrase_len = 0\n for phrase in raw_data:\n if (len(phrase) > self.max_phrase_len):\n self.max_phrase_len = len(phrase)\n self.max_vector_len = self.max_phrase_len * len(self.base_vocab)\n\n \"\"\" Convert data to vectors \"\"\"\n k = []\n for phrase in raw_data:\n k.append(stringToVector(phrase, self.vocab, self.max_vector_len))\n self.data = np.asarray(k)\n\n \"\"\" Do not yet initialize the trainer -- we can retrain it later. \"\"\"\n self.trainer = None", "def train_model(self, text, labels):\n clf = svm.SVR()\n count_vect = CountVectorizer()\n tfidf_transformer = TfidfTransformer()\n counts = count_vect.fit_transform(text)\n tfidf = tfidf_transformer.fit_transform(counts)\n clf.fit(tfidf, labels)\n\n return clf, count_vect, tfidf_transformer", "def custom_training(nb_tweet_sample, randomised, equal_pos_neg, language, name_kernel, Resource, keep_null_vector):\n m_features, m_labels = get_characteristic_label_vectors(nb_tweet_sample, randomised, equal_pos_neg, Resource,\n keep_null_vector, language)\n\n kernel = Kernel.get_correct_kernel(name_kernel)\n custom_SVM = SVM(kernel)\n custom_SVM.fit(m_features, m_labels)\n\n return custom_SVM", "def prepare_data(self, context_size, model_name):\n self.context_size = context_size\n data_x = []\n data_y = []\n oob = self.word2idx['OOB']\n\n for item in self.docs:\n data = [oob] * context_size + self.doc2token(item) + [oob] * context_size #padding\n for i in range(context_size, len(data) - context_size):\n data_x.append(data[i - context_size: i] + data[i + 1: i + context_size + 1])\n data_y.append(data[i])\n \n if model_name.lower() == 'skipgram':\n data_x, data_y = data_y, data_x\n self.data_x = Variable(torch.LongTensor(data_x))\n self.data_y = Variable(torch.LongTensor(data_y))\n logging.info(f'data preprocessed, data shape: {self.data_x.shape}, {self.data_y.shape}')", "def fasttext_wordvectors(corpus_path, model_path):\n model = fasttext.train_unsupervised(corpus_path)\n model.save_model(model_path)\n return model", "def construct_embedding(captions, cbow=True):\n\n # List of characters to filter out of the caption string\n chars_to_remove = [\"\\n\", \">\", \"--\"]\n\n for char in chars_to_remove:\n captions = captions.replace(char, \" \")\n\n # Perform some necessary tokenization\n data = [word_tokenize(word.lower()) for word in sent_tokenize(captions)]\n\n # Filter out punctuation from the data\n data_minus_punctuation = remove_punctuation(data)\n\n # Filter out stop words from the data\n data_minus_stop_words = remove_stop_words(data_minus_punctuation)\n\n # Filter out any surviving single character list elements\n fully_formatted_data = [\n [word for word in item if len(word) > 1] for item in data_minus_stop_words\n ]\n\n # Train the Word2Vec model using the specified architecture\n if cbow:\n print(\"UTILIZING CBOW ARCHITECTURE\")\n model = Word2Vec(fully_formatted_data, min_count=1, size=100, window=5)\n else:\n print(\"UTILIZING SKIPGRAM ARCHITECTURE\")\n model = Word2Vec(fully_formatted_data, min_count=1, size=100, window=5, sg=1)\n\n return model", "def __init__(self, word2vec_model):\n self._model = word2vec_model", "def build_model():", "def get_data_and_labels(raw_data, categories):\n data = []\n for d in raw_data:\n # filter out the categories we don't care about\n d['categories'] = [c for c in d['categories'] if c in categories]\n if d['categories']:\n data.append(d)\n vectorizer = TfidfVectorizer(tokenizer=tokenize, stop_words='english')\n raw_labels = [d['categories'] for d in data]\n data = vectorizer.fit_transform([d['text'] for d in data])\n mlb = MultiLabelBinarizer()\n labels = mlb.fit_transform(raw_labels)\n return data, labels, vectorizer, mlb", "def build_model():\n \n #english trained optimized pipeline for word embedding\n nlp = spacy.load(\"en_core_web_md\") # this model will give you 300D\n \n pipeline = Pipeline([\n ('features', FeatureUnion([\n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ])),\n \n ('embeddings_pipeline', Pipeline([\n ('vect_trans',SpacyVectorTransformer(nlp)),\n ('reduce_dim', TruncatedSVD(50)),\n ])),\n \n ])),\n \n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n \n parameters = {\n 'features__text_pipeline__vect__max_df': (0.5, 0.75, 1.0),\n 'features__embeddings_pipeline__reduce_dim__n_components':(50,60,70,100,120,130,150)\n }\n cv = GridSearchCV(pipeline, param_grid=parameters,cv=2)\n \n return cv", "def _train(cls, ds):\n\n tf = TfidfVectorizer(analyzer='word',\n ngram_range=(1, 3),\n min_df=0,\n stop_words='english')\n tfidf_matrix = tf.fit_transform(ds['description'])\n with open('ds.pickle', 'wb') as fp:\n pickle.dump(ds, fp)\n\n with open('tfidf_matrix.pickle', 'wb') as fp:\n pickle.dump(tfidf_matrix, fp)\n\n with open('tf_model.pickle', 'wb') as fp:\n pickle.dump(tf, fp)", "def transform(self, raw_documents):\n if isinstance(raw_documents, str):\n raise ValueError(\n \"Iterable over raw text documents expected, string object received.\"\n )\n if not hasattr(self, \"_vect\"):\n raise ValueError(\"Model need to be fitted first!\")\n\n if not hasattr(self, \"vocabulary_\"):\n self._validate_vocabulary()\n\n self._check_vocabulary()\n\n # use the same matrix-building strategy as fit_transform\n indices, indptr, data = self._vect.transform(raw_documents)\n n_features = self._vect.get_n_features()\n X = sp.csr_matrix((data, indices, indptr), shape=(len(indptr) - 1, n_features))\n\n if self.binary:\n X.data.fill(1)\n return X", "def build_model(self, text, n = 3):\n \n try:\n self.lm.build_model(text,n)\n except:\n raise\n \n self.vocab = Counter(words(text))\n\n return self.lm", "def apply_model(data, ids):\n similar_vectors = load_model(data, ids)\n return similar_vectors", "def create_tdm(cls):\n X = cls.vectorizer.fit_transform(cls.processed_documents) # Convert the X as transposed matrix\n X = X.T.toarray() # Create a DataFrame and set the vocabulary as the index\n cls.df_tdm = pd.DataFrame(X, index=cls.vectorizer.get_feature_names())", "def load(self):\n\n x = [] # input documents (n_docs, max_seq_len)\n labels = [] # targets we are predicting for each input\n\n for file_path in glob.glob(self.train_dir + '*.txt'):\n tokens = read_tokens(file_path)\n unique = list(set(tokens))\n x_count = round(len(unique) * 0.85)\n\n for _ in range(self.samples_per_doc):\n random.shuffle(unique)\n x.append(' '.join(unique[:x_count]))\n labels.append(' '.join(unique[x_count:]))\n\n # make x and y\n pkl = open('Model/tokenizer.p', 'rb')\n self.tokenizer = pickle.load(pkl)\n x = self.tokenizer.texts_to_matrix(x, mode='binary')\n y = self.tokenizer.texts_to_matrix(labels, mode='binary')\n\n # column zero is empty\n return x, y[:,1:]", "def __init__(self, words, infile=None, cloudstore=False):\n #if pre-defined model is provided, use that as probabilities\n if infile:\n if cloudstore:\n url = infile\n wget.download(url, '/tmp/model-weights.pkl')\n with open('/tmp/model-weights.pkl', 'rb') as main_dict:\n self.probas = pickle.load(main_dict)\n else:\n with open(infile, 'rb') as main_dict:\n self.probas = pickle.load(main_dict)\n \n #otherwise if no model is provided, we need to compute our entire model\n else:\n # Raw trigram counts over the corpus. \n # c(w | w_1 w_2) = self.counts[(w_2,w_1)][w]\n self.counts = defaultdict(lambda: defaultdict(lambda: 0.0))\n\n # Iterate through the word stream once.\n w_1, w_2 = None, None\n for word in words:\n if w_1 is not None and w_2 is not None:\n # Increment trigram count.\n self.counts[(w_2,w_1)][word] += 1\n # Shift context along the stream of words.\n w_2 = w_1\n w_1 = word\n \n # Normalize so that for each context we have a valid probability\n # distribution (i.e. adds up to 1.0) of possible next tokens.\n self.probas = defaultdict(lambda: defaultdict(lambda: 0.0))\n for context, ctr in self.counts.items():\n self.probas[context] = normalize_counter(ctr)", "def __init__(self, docs, dict_path= 'wordindex.npy'):\n super(NNModel, self).__init__()\n self.stopwords += self.additional_stopwords\n self.words = set(['OOB', 'UNK']) # OOB for out of boundary, UNK for unknown words\n self.docs = []\n\n for doc in docs:\n datum = []\n for word in self.cut_words(doc):\n self.words.add(word)\n datum.append(word)\n self.docs.append(datum)\n\n self.words = list(self.words)\n self.word2idx = dict([(self.words[i], i) for i in range(len(self.words))])\n logging.info(f'{len(docs)} articles loaded, with word bag length: {len(self.words)}')\n if dict_path != '': # save dict\n np.save(DATA_DIR + dict_path, self.word2idx)", "def create_train_label_tokenizer(data):\n data.label_tokenizer = Tokenizer(\n filters=\"\", split=data.arg.split, oov_token=\"<unk>\"\n )\n\n # open training data file\n input_file_path = os.path.join(data.full_train_path, data.arg.input_file)\n with open(input_file_path, encoding=data.arg.encode_mode) as fin:\n\n # store each line (ending with \\n) in a list of list of\n lines = fin.readlines()\n lines = [line.strip().lower().split(\"\\t\") for line in lines]\n try:\n _, _, intent = zip(*lines)\n except Exception:\n print(lines)\n raise FileNotFoundError(\"The input training data file is invalid!\")\n\n # Updates internal {index: word} and {index: doc} vocabularies\n # based on the list of utterances, their IOB tags and their intent label\n data.label_tokenizer.fit_on_texts(intent)\n\n # record acquired knowledge\n with open(\"./knowledge\", 'w') as file: \n file.write(\"Learnt intents from training corpus:\\n\\n\")\n for key, value in data.label_tokenizer.index_word.items():\n file.write(f\"{key} : {value}\\n\")\n return data", "def build_dataset(words):\n count = []\n # count.extend(collections.Counter(words).most_common(n_words - 1))\n count.extend(collections.Counter(words).most_common())\n dictionary = dict()\n for word, _ in count:\n dictionary[word] = len(dictionary)\n data = list()\n # unk_count = 0\n for word in words:\n index = dictionary.get(word, 0)\n # if index == 0: # dictionary['UNK']\n # unk_count += 1\n data.append(index)\n # count[0][1] = unk_count\n reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n data = [data[::2],data[1::2]]\n new_data = list()\n for i in range(len(data[0])):\n new_data.append([data[0][i],data[1][i]])\n data = new_data\n vocabulary_size = len(dictionary)\n print(\"\\n\\ndictionary size = \")\n print(len(dictionary))\n return data, count, dictionary, reversed_dictionary, vocabulary_size", "def build_model(self):\n doc_input = Input(shape=(self.max_sent_num ,self.max_sent_length,512), dtype='float32')\n doc_in=Flatten()(doc_input)\n \n #masked3=Masking(mask_value=Special_value)(doc_input)\n \n # self.model_sent = self.build_sent_encoder()\n \n # doc_encoder= TimeDistributed(self.model_sent)(doc_in)\n \n # document_att= self.build_doc_encoder(doc_encoder)\n dense= Dense(DENSE_SIZE,activation='softmax')(doc_in)\n #doc_att = self.build_sent_encoder(sent_encoder)\n # dense the output to 2 because the result is a binary classification.\n output_tensor = Dense(3, activation='softmax', name='classification')(dense)\n # Create Sentence-level Model\n self.model = Model(doc_input, output_tensor)", "def __init__(self, loader, model, config):\n\n # objects\n\n self.config = config\n self.model = model\n\n try:\n self.words = loader.words\n self.linked = loader.linked\n except:\n raise Exception(\"Not text descriptions loaded\")\n\n self.vectors = self.embed(loader, model)\n self.Z = self.link()\n self.counts = self.count()", "def training(train_data, dev_data, param):\n text_to_vec = TextToVec(**param)\n\n # Fit with both train and dev data\n text_to_vec.fit(train_data['data'] + dev_data['data'])\n word_vec_map = text_to_vec.vectorizer.get_feature_names()\n train_vec = text_to_vec.transform(train_data['data'])\n dev_vec = text_to_vec.transform(dev_data['data'])\n logger.info(f\"train vec size:{train_vec.shape}, dev vec size:{dev_vec.shape}\")\n\n # # apply weights on tfidf based on whether the word appear in multiple classes\n # tt_occ = Counter(train_data['encoded_label'])\n # weight_list = []\n # for i in range(train_vec.shape[1]): # For every feature\n # occ = Counter(train_data['encoded_label'][train_vec[:, i] > 0.0])\n # for key, value in occ.items():\n # occ[key] = value/tt_occ[key]\n # weight_list.append(np.std(list(occ.values()))/0.35)\n # weight = np.array(weight_list).reshape(1, -1)\n # weight = weight/np.max(weight)\n # train_vec = np.multiply(train_vec, weight)\n\n # Perform oversampling on training data\n if param['balanced'] not in ['Bootstrap', 'Handsample']:\n logger.info(f\"class info before resampling: {sorted(Counter(train_data['encoded_label']).items())}\")\n train_vec, train_data['encoded_label'] = resample(X_train=train_vec, y_train=train_data['encoded_label'], balance=param['balanced'])\n logger.info(f\"class info after resampling:{sorted(Counter(train_data['encoded_label']).items())}\")\n\n # Fit model\n if param['classifier'] == 'MultinomialNB':\n clf = MultinomialNB()\n elif param['classifier'] == 'LDA':\n clf = LinearDiscriminantAnalysis()\n else:\n clf = svm.LinearSVC()\n\n if param['multiclass'] == 'OnevsOne':\n model = OneVsOneClassifier(clf)\n else:\n model = OneVsRestClassifier(clf)\n\n if param['classifier'] == 'LinearSVM' or param['multiclass'] == 'OnevsOne':\n logger.info(f'Fitting model: {param}')\n model = model.fit(train_vec, train_data['encoded_label'])\n train_prediction = model.predict(train_vec)\n dev_prediction = model.predict(dev_vec)\n else:\n logger.info(f'Fitting model: {param}')\n model = model.fit(train_vec, train_data['binary_label'])\n train_prediction = np.argmax(model.predict(train_vec), axis=1)\n dev_prediction = np.argmax(model.predict(dev_vec), axis=1)\n\n\n return train_prediction, dev_prediction, train_vec.shape, dev_vec.shape, model, word_vec_map", "def generate_limittedmodel():\r\n print('Loading model')\r\n model = KeyedVectors.load_word2vec_format(BIN_NAME, binary=True)\r\n print('Model loaded!')\r\n\r\n print('Loading dot products')\r\n dp = np.load(DP_NAME)\r\n print('Dot products loaded')\r\n\r\n print('Filtering vocab')\r\n for name, vocab in list(model.vocab.items()):\r\n if dp[vocab.index] < MAX_DEGREE:\r\n del model.vocab[name]\r\n\r\n il = list(model.vocab.items())\r\n print('Sorting vocab')\r\n il.sort(key=lambda x: x[1].index)\r\n\r\n # Find the indexes of the words that are being kept\r\n print('Generating indexes')\r\n indexes = []\r\n for i in range(0, len(il)):\r\n name, vocab = il[i]\r\n indexes.append(vocab.index)\r\n model.vocab[name].index = i\r\n\r\n print('Modifying model weights')\r\n model.syn0 = model.syn0[indexes]\r\n\r\n print('Saving file')\r\n model.save_word2vec_format(SAVE_NAME, binary=True)", "def from_dataset(cls, dataset, columns=None, freq_range=None, top_k=None, special_tokens=None,\n special_first=True):\n\n vocab = Vocab()\n if columns is None:\n columns = []\n if not isinstance(columns, list):\n columns = [columns]\n if freq_range is None:\n freq_range = (None, None)\n if special_tokens is None:\n special_tokens = []\n root = copy.deepcopy(dataset).build_vocab(vocab, columns, freq_range, top_k, special_tokens, special_first)\n for d in root.create_dict_iterator(num_epochs=1):\n if d is not None:\n raise ValueError(\"from_dataset should receive data other than None.\")\n return vocab", "def load_data():\n # Load and preprocess data\n sentences, labels = load_data_and_labels()\n sentences_padded = pad_sentences(sentences)\n vocabulary, vocabulary_inv = build_vocab(sentences_padded)\n x, y = build_input_data(sentences_padded, labels, vocabulary)\n return [x, y, vocabulary, vocabulary_inv]", "def __init__(self, data, m=100, eta=0.1, seq_length=25, sigma= 0.01):\n\n self.m, self.eta, self.seq_length = m, eta, seq_length\n self.vocab_len = data['vocab_len']\n self.ind_to_char = data['ind_to_char']\n self.char_to_ind = data['char_to_ind']\n self.book_data = data['book_data']\n\n self.b = np.zeros((m, 1))\n self.c = np.zeros((self.vocab_len, 1))\n\n self.U = np.random.normal(0, sigma, size=(m, self.vocab_len))\n self.W = np.random.normal(0, sigma, size=(m, m))\n self.V = np.random.normal(0, sigma, size=(self.vocab_len, m))", "def build_train_data(self,data_folder, cv=10, clean_string=False):\n revs = []\n\n vocab = defaultdict(float)\n print data_folder\n with codecs.open( data_folder, 'rb') as fi:\n for line in fi.readlines():\n line = line.decode('utf-8')\n parts = line.split(\"\\n\")[0].split(\"\\t\")\n if len(parts) > 1:\n sent = parts[1]\n rev = []\n rev.append(sent.strip())\n\n if clean_string:\n orig_rev = self.dc.clean_str(\" \".join(rev))\n else:\n orig_rev = \" \".join(rev).lower()\n #print orig_rev\n words = set(orig_rev.split())\n for word in words:\n vocab[word.lower()] += 1\n if len(orig_rev.split()) < 50 :\n\n datum = {\"y\":int(parts[0]),\n \"text\": orig_rev,\n \"num_words\": len(orig_rev.split()),\n \"split\": np.random.randint(0,cv)}\n revs.append(datum)\n # else:\n # print orig_rev\n\n\n return revs, vocab", "def __init__(self, center_words, context_words, neg_samples): \n self.center_words = center_words\n self.context_words = context_words\n self.neg_samples = neg_samples\n # The index of the data the batch should start from. \n self.data_index = 0", "def __init__(self, tokens):\n self.mdl = self.train(tokens)", "def __init__(self, tokens):\n self.mdl = self.train(tokens)", "def train(self, examples):\n print(examples)\n # first we will do gensim to get word embeddings\n tokens = []\n for example in examples:\n for tuple in example:\n tokens.append([tuple[0]])\n self.model = Word2Vec(tokens, min_count=1, size=100).wv\n # shuffle the examples so that they are gone through 'randomly'\n #print(examples)\n random.shuffle(examples)\n #print(examples)\n # iterate through our examples\n for j in range(len(examples)):\n # the stored label for the previous token\n prev_label = None\n prev_word = None\n # iterate through our tokens for the example\n for i in range(len(examples[j])):\n # store our token and its label\n token = examples[j][i][0]\n y = examples[j][i][1]\n # get the features for our current token\n next_word = None\n if i <= (len(examples)-1):\n next_word = examples[j][i+1][0]\n features = self.featurize(prev_label, prev_word, token, next_word)\n # set our previous label to our current since\n # we are done featurizing and need to store it for\n # the next iteration\n prev_label = y\n # a dictionary that will store our z values\n z = {}\n # calculate our z value for every state for\n # the example we are on\n # z(state) = features * weights\n # z[state] = np.dot(features, weights[state])\n for state in self.states:\n z[state] = np.dot(features, self.weights[state])\n # store our max\n max = -1\n # store our y_hat\n y_hat = None\n # store our probabilities\n prob = {}\n # this runs softmax on our z's\n # y_hat = softmax(z)\n denom = sum(np.exp(np.array(list(z.values()))))\n for state in self.states:\n # softmax = p(state) = e^z[state] / (sum[e^z for all z's)\n # making sure this works the way I want it to, should\n # be three values\n #print(np.array(list(z.values())))\n #print(np.exp(np.array(list(z.values()))))\n prob[state] = np.exp(z[state]) / denom\n # if our current prob is greater than the others then it is our boy\n if prob[state] > max:\n # save the new prob as the max\n max = prob[state]\n # save the state as our prediction y_hat\n y_hat = state\n # this will hold our gradients for all the states\n gradients = {}\n for state in self.states:\n # gradient[state] = ((y_hat == state) - prob[state]) * features\n gradients[state] = ((y_hat == state) - prob[state]) * features\n # weights[state] -= loss * gradients\n self.weights[state] -= self.loss * gradients[state]", "def get_summary_model(processed_text, model_type, number_topics):\n\n if model_type == 'LDA':\n count_model = CountVectorizer(ngram_range=(1, 1)).fit(processed_text)\n return count_model, LDA(n_components=number_topics, learning_method='batch').fit(count_model.fit_transform(processed_text))\n if model_type == 'LSA':\n tf_idf_model = TfidfVectorizer(ngram_range=(1, 1)).fit(processed_text)\n return tf_idf_model, TruncatedSVD(n_components=number_topics, algorithm='randomized', n_iter=100, random_state=122).fit(tf_idf_model.transform(processed_text))\n else:\n tf_idf_model = TfidfVectorizer(ngram_range=(1, 1)).fit(processed_text)\n return tf_idf_model, NMF(n_components=number_topics, init='random', random_state=0).fit(tf_idf_model.transform(processed_text))", "def sample_handling(sample, lexicon, classification):\n\n # We have a list of lists [.... [ [0, 2, 1, 0, 0, ..., 0] [1] ] , .... ] with the bag of words and the class\n featureset = []\n\n # Open the sample text and parse through the document and generate feastures.\n with open(sample, 'r') as f:\n contents = f.readlines()\n for l in contents[:hm_lines]:\n current_words = word_tokenize(l.lower())\n current_words = [lemmatizer.lemmatize(i) for i in current_words]\n features = np.zeros(len(lexicon))\n for word in current_words:\n if word.lower() in lexicon:\n index_value = lexicon.index(word.lower())\n features[index_value] = 1\n features = list(features)\n featureset.append([features, classification])\n\n return featureset", "def basic_model_init(model_args, task_infos, tokenizer):\n config = AutoConfig.from_pretrained(\n model_args.model_name_or_path,\n num_labels=task_infos.num_labels,\n cache_dir=model_args.model_cache_dir,\n id2label=task_infos.id2label,\n label2id=task_infos.label2id,\n )\n model_cls = getattr(mod, model_args.architectures,\n AutoModelForSequenceClassification)\n model = model_cls.from_pretrained(\n model_args.model_name_or_path,\n config=config,\n cache_dir=model_args.model_cache_dir,\n )\n if model.config.vocab_size < len(tokenizer):\n print(\"resize...\")\n model.resize_token_embeddings(len(tokenizer))\n return model", "def get_instance_from_words(data):\n inst = Dataset.get_instance_template()\n inst[\"words\"] = data\n return inst", "def train_word2vec(self, size = 50, window = 20, min_count = 5, epochs = 40):\n\n\n # Read the entire previous data for training\n full_data = pd.read_csv(self.path_full_data, encoding = \"ISO-8859-1\")\n\n # Also read the column which we are performing analysis for\n col_data = pd.read_csv(self.path_data_col\n , encoding = \"ISO-8859-1\"\n , usecols = [self.id_col_name, self.col_name])\n \n\n # Clean the data in the column\n col_data[self.col_name] = self.cln.clean(col_data[self.col_name], typo = self.typo_ind)\n col_data.replace(np.nan, '', inplace = True)\n col_name_list = list(col_data[self.col_name].apply(lambda x: str(x).split(' ')))\n\n\n # Make a list of lists of the data\n input_list = list(full_data['response'].apply(lambda x: x.split(' ')))\n input_list = input_list + col_name_list\n\n # Remove the responses having only one or two words\n input_list = [x for x in input_list if len(x) > 1]\n\n # Build vocabulary and train model\n model = gensim.models.Word2Vec(\n input_list,\n size = size,\n window = window,\n min_count = min_count)\n\n model.train(input_list, total_examples = len(input_list), epochs = epochs)\n\n return model", "def word2vec_model(sentences, size=100, min_count=5, window=5,\n negative=5, cbow=True, iterations=5, seed=0,\n workers=1):\n if cbow is True:\n sg = 0\n else:\n sg = 1\n model = Word2Vec(size=size, window=window,\n min_count=min_count, workers=workers,\n sg=sg, negative=negative, seed=seed)\n\n model.build_vocab(sentences)\n\n model.train(sentences, total_examples=model.corpus_count,\n epochs=iterations)\n return model", "def build_model_mobilenet(num_classes):", "def construct(data_dir, fname, X=None, normalize=False, _type='sparse'):\n if _type == 'sparse':\n return SparseFeatures(data_dir, fname, X, normalize)\n elif _type == 'dense':\n return DenseFeatures(data_dir, fname, X, normalize)\n elif _type == 'sequential':\n return SequentialFeatures(data_dir, fname, X)\n else:\n raise NotImplementedError(\"Unknown feature type\")", "def train(self, train_data):\n with open(train_data, 'r') as train_data:\n while True:\n tokens = train_data.readline().split()\n pos = train_data.readline().split()\n labels = train_data.readline().split()\n if not tokens or not pos or not labels:\n break\n # Generate transition probabilities\n for i in range(0, len(labels) - self.N_VALUE + 1):\n self.add_label_sequence(labels[i:i + self.N_VALUE])\n # Generate lexical generation probabilities\n for i in range(0, len(tokens)):\n token = tokens[i].lower()\n label = labels[i]\n self.add_word_tag(token, label)\n self.handle_unknowns()", "def test_RecurrentNeuralNetwork_build_classification() -> None:\n vectorizer = Vectorizer('glove.6B.50d.txt')\n input_shape = {\n 'pos': (len(vectorizer.pos2index), 10),\n 'shape': (len(vectorizer.shape2index), 2)\n }\n rnn = RecurrentNeuralNetwork.build_classification(vectorizer.word_embeddings, input_shape, 1)\n assert isinstance(rnn._model, Model)", "def get_train_data(self, train_data):\n X = []\n Y = []\n\n # word 2 indices and tag 2 indices\n w2i = {} # word to index\n c2i = {} # char to index\n tag2idx = {} # tag2idx\n\n w2i[\"_UNK\"] = 0 # unk word / OOV\n c2i[\"_UNK\"] = 0 # unk char\n c2i[\"<w>\"] = 1 # word start\n c2i[\"</w>\"] = 2 # word end index\n \n \n num_sentences=0\n num_tokens=0\n for instance_idx, (words, tags) in enumerate(read_conll_file(train_data)):\n instance_word_indices = [] #sequence of word indices\n instance_char_indices = [] #sequence of char indices\n instance_tags_indices = [] #sequence of tag indices\n\n for i, (word, tag) in enumerate(zip(words, tags)):\n\n # map words and tags to indices\n if word not in w2i:\n w2i[word] = len(w2i)\n instance_word_indices.append(w2i[word])\n\n if self.c_in_dim > 0:\n chars_of_word = [c2i[\"<w>\"]]\n for char in word:\n if char not in c2i:\n c2i[char] = len(c2i)\n chars_of_word.append(c2i[char])\n chars_of_word.append(c2i[\"</w>\"])\n instance_char_indices.append(chars_of_word)\n\n if tag not in tag2idx:\n tag2idx[tag]=len(tag2idx)\n\n instance_tags_indices.append(tag2idx.get(tag))\n\n num_tokens+=1\n\n num_sentences+=1\n\n X.append((instance_word_indices, instance_char_indices)) # list of word indices, for every word list of char indices\n Y.append(instance_tags_indices)\n\n\n print(\"%s sentences %s tokens\" % (num_sentences, num_tokens), file=sys.stderr)\n print(\"%s w features, %s c features \" % (len(w2i),len(c2i)), file=sys.stderr)\n if self.c_in_dim == 0:\n print(\"char features disabled\", file=sys.stderr)\n\n assert(len(X)==len(Y))\n\n # store mappings of words and tags to indices\n self.set_indices(w2i, c2i, tag2idx)\n\n return X, Y", "def _read(self, documents):\n data = []\n X,Y = [], []\n for document in documents:\n d_ata = pd.read_csv(document, sep='\\t', names=['review','label'])\n data.append(d_ata)\n data = pd.concat(data)\n self.data = data\n Y = data.label\n self.vec.fit(data.review)\n X = self.preprocess(data)\n \n return train_test_split(X,Y)", "def word_vecs(self, raw_label=False):\n utterances, labels = self.read_json()\n # print(utterances)\n # print(self.label_dict)\n utterances = [self.word2vec(u) for u in utterances]\n if raw_label:\n labels = labels\n else:\n labels = [self.label_dict[l] for l in labels]\n\n return utterances, labels", "def read_word2vec_model():\n file_name = \"word2vec_model.txt\"\n # these are the pre-2018 lines to load a model:\n # from gensim.models.word2vec import Word2Vec\n # m = Word2Vec.load_word2vec_format(file_name, binary=False)\n \n # here are the post-2018 lines to load a model:\n from gensim.models import KeyedVectors\n print(\"Starting to load the model in \", file_name, \"...\")\n m = KeyedVectors.load_word2vec_format(file_name, binary=False)\n print(\"Model loaded.\\n\")\n\n print(\"The model built is\", m, \"\\n\")\n print(\"m.vocab has\", len(m.vocab), \"words\")\n ## The above line should print\n ## m.vocab has 43981 words\n\n print(\"Each word is a vector of size\", m.vector_size)\n ## which should tells us that each word is represented by a 300-dimensional vector\n\n print(\"\\nTry m.get_vector('hello') to see one...!\\n\")\n ## Once the model is built, it can't be changed without rebuilding it; we'll leave it. \n\n return m", "def __init__(self, words, labels):\n self.words = words\n self.labels = labels", "def construct_NLP_model(self, df=None):\n import review_processing as rp\n # get words\n if df is not None:\n nitems = df.shape[0]\n col_names = df.columns.values\n if self.review_col_name not in col_names or \\\n self.sentiment_col_name not in col_names:\n sys.exit('construct_NL_model: The name {0}/{1} cannot be found'.\n format(self.review_col_name, self.sentiment_col_name))\n review_list = df[self.review_col_name].values.tolist()\n meaningful_words = map(self.review_to_meaningful_words,\n review_list)\n # Get training sentiment values\n self.sentiment = df[self.sentiment_col_name].values\n\n else:\n if self.training_file_name is None:\n sys.exit('construct_NLP_model: traning file name does not '\n 'exist')\n else:\n suffix = os.path.splitext(self.training_file_name)[1][1:]\n if suffix == 'csv':\n df = pd.read_csv(self.training_file_name)\n if self.review_col_name not in col_names or \\\n self.sentiment_col_name not in col_names::\n sys.exit('construct_NL_model: The name {0}/{1} cannot '\n ' be found'.format(self.review_col_name,\n self.sentiment_col_name))\n nitems = df.shape[0]\n review_list = df[review_col_name].values.tolist()\n meaningful_words = map(self.review_to_meaningful_words,\n review_list)\n elif suffix == 'json':\n data_dict_list = rp.load_data(self.training_file_name)\n if self.review_col_name not in data_dict_list.keys():\n sys.exit('construct_NL_model: The name {0} cannot be '\n 'found'.format(review_col_name))\n review_list = map(lambda x: x[review_col_name],\n data_dict_list)\n meaningful_words = map(self.review_to_meaningful_words,\n review_list)\n else:\n sys.exit('construct_NLP_model: file type not supported '\n 'yet!')\n\n # Training process of Bag of Worlds\n if self.NLP_model == 'BagofWords':\n print('construct_NLP_model: Creating bag of words...')\n self.vectorizer = CountVectorizer(analyzer='word',\n tokenizer=None,\n preprocessor=None,\n stop_words=None,\n max_features=self.maxfeature)\n self.train_data_features = vectorizer.fit_transform(\n meaningful_words)\n self.train_data_features = train_data_features.toarray()\n\n # vocab = vectorizer.get_feature_names()\n # dist = np.sum(train_data_features, axis=0)\n # for tag, count in zip(vocab, dist):\n # print(count, tag)\n\n else:\n sys.exit('construct_NLP_model: NLP_model type not supported yet!')", "def __init__(self):\n self.device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n \n self.model = models.resnet101() \n self.model.load_state_dict(torch.load(WEIGHTS_DIR))\n \n self.model.to(self.device)\n \n self.model.eval()\n \n # labels\n with open(LABELS, 'r') as f:\n self.labels = ast.literal_eval(f.read())\n \n # define data transform\n self.transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])", "def build_vocab(self):\n if self.test_file is None:\n print('test_file is None')\n file_list = [self.train_file, self.dev_file]\n else:\n file_list = [self.train_file, self.dev_file, self.test_file]\n\n examples = []\n for file_name in file_list:\n examples += ParseExample.load_data(file_name)\n\n sents = []\n for example in examples:\n warrant0, warrant1, reason, claim, debate_meta_data, negclaim = example.get_six(type=WORD_TYPE)\n sents.append(warrant0)\n sents.append(warrant1)\n sents.append(reason)\n sents.append(claim)\n sents.append(debate_meta_data)\n\n vocab = data_utils.build_word_vocab(sents)\n\n return vocab", "def from_dataset(cls, dataset, col_names, vocab_size, character_coverage, model_type, params):\n\n vocab = SentencePieceVocab()\n root = copy.deepcopy(dataset).build_sentencepiece_vocab(vocab, col_names, vocab_size, character_coverage,\n model_type, params)\n for d in root.create_dict_iterator(num_epochs=1):\n if d is None:\n raise ValueError(\"from_dataset should receive data other than None.\")\n return vocab", "def create_vocab():\n \n cutoff = CUTOFF\n \n lines = open(INFNAME_FORMAT.format(\"train\")).readlines() \\\n + open(INFNAME_FORMAT.format(\"test\")).readlines()\n raw = [process_line(l) for l in lines]\n cntx = Counter( [ w for e in raw for w in e ] )\n vocab = { x for x, y in cntx.items() if y > cutoff }\n \n return vocab", "def __init__(self, model_config, global_config):\n self.messages = []\n self.config = global_config\n\n self.name = model_config[\"name\"] \n self.data_filename = model_config[\"data\"] \n self.clock = model_config.get(\"clock\", \"\")\n self.features = model_config.get(\"features\",[\"*\"])\n self.exclusions = model_config.get(\"exclusions\",None)\n self.constant_feature = False\n self.constant_feature_removed = False\n self.frequencies = model_config.get(\"frequencies\", \"empirical\")\n self.pruned = model_config.get(\"pruned\", False)\n self.rate_variation = model_config.get(\"rate_variation\", False)\n self.feature_rates = model_config.get(\"feature_rates\", None)\n self.ascertained = model_config.get(\"ascertained\", None)\n # Force removal of constant features here\n # This can be set by the user in BinaryModel only\n self.remove_constant_features = True\n self.minimum_data = float(model_config.get(\"minimum_data\", 0))\n self.substitution_name = self.__class__.__name__\n self.data_separator = \",\"\n self.use_robust_eigensystem = model_config.get(\"use_robust_eigensystem\", False)\n\n # Load the entire dataset from the file\n self.data = load_data(self.data_filename, file_format=model_config.get(\"file_format\",None), lang_column=model_config.get(\"language_column\",None))\n # Remove features not wanted in this analysis\n self.build_feature_filter()\n self.apply_feature_filter()\n\n # Keep this around for later...\n self.global_config = global_config", "def train_model(self, d=0.7):\n #eg: model = spammy.train_model()\n count_dict = self.freq_count()\n N = sum(count_dict.values())\n n_plus = len(count_dict)\n alpha = (d * n_plus) / N\n normalizer = alpha * (1/len(self._vocab_set))\n model = defaultdict(lambda:normalizer)\n \n for word in set(self._vocab_set):\n prob = normalizer\n if word in count_dict:\n prob += (count_dict[word] - d) / N\n prob = math.log(prob)\n model[word] = prob\n return model", "def featurize(self, data):\n \n features = []\n\n # tokens = data.split()\n\n #Modification 1: Normalization: All lowercase\n #Removing this did not seem to have any performance boost\n #but it did nothing negative either\n data = data.lower()\n\n #Modification 2: Normalization: Tokenizing using NLTK\n #Keep this\n # tokens = word_tokenize(data)\n tokens = data.split()\n\n #Modification 3: Word List: Removing stop words using NLTK\n #Keep this\n stop_words = set(stopwords.words('english'))\n tokens_filtered = []\n\n for t in tokens:\n if t not in stop_words:\n tokens_filtered.append(t)\n\n tokens = tokens_filtered\n\n #Modification 4: Pre-Processing Lemmization using NLTK\n #Surprisingly does not appear to impact performance\n # for t in tokens:\n # t = self.wordnet_lemmatizer.lemmatize(t)\n\n capital = 0\n average_word_length = 5 #It's 4.7, but we'll use 5\n short_words = 0\n long_words = 0\n\n for t in tokens:\n\n #Feature 1: Bag of words\n features.append((t, True))\n\n if(t.isupper()):\n capital += 1\n\n #Feature 3: Long or short word counter, intentionally ignoring length 4\n #and 5 as those are close to average\n #Very important that stop words were removed\n if(len(t) > average_word_length):\n long_words += 1\n elif(len(t) < average_word_length - 1):\n short_words += 1\n \n #Feature 2: Lots of capital\n #Remove this. It only appears to be a rough count of sentence number vs.\n #Capturing any sentiment. Does not impact F1 score in given train/dev sets\n # if(capital > 2):\n # features.append((\"LOTS_OF_CAPITAL\", True))\n\n #Feature 3: Long or short words\n # if(long_words > short_words):\n # features.append((\"LOTS_OF_LONG_WORDS\", True))\n\n\n\n return features", "def load_data():\n dictionary = corpora.Dictionary.load(app.config['DICTIONARY'])\n matrix = similarities.MatrixSimilarity.load(app.config['MATRIX'])\n model = models.LsiModel.load(app.config['MODEL'])\n df = pd.read_pickle(app.config['DATA_FRAME'])\n return Data(matrix=matrix, model=model, dictionary=dictionary, data_frame=df)", "def build_model(self , text, n=3): #should be called build_model\n self.n = n\n self.vocab = Counter(words(text))\n\n tokens=tokenize(text)\n for gram in list(ngrams(tokens,self.n)):\n self.lm_dict[tuple(gram[:-1])][gram[-1]]+=1", "def __init__(self, data_dir, mode):\n data = pd.read_csv(path.join(data_dir, mode+\"_data.txt\"), delimiter='|')\n word2idx = np.load(path.join(data_dir, \"word2idx.dict\"))\n sentences = [sentence.split(' ') for sentence in data['sentence']]\n self.data_len = data['sequence_length'].values\n self.labels = data['sentiment_label'].values\n self.sequence_data = np.array([[word2idx[w] for w in s] for s in sentences])", "def build_model(self, text):\n text = '< ' * (self.n - 1) + text.replace(' . ', ' .%s ' % (' <' * (self.n - 1))) + ' >'\n tokens = self.split(text)\n self.corpus_len = len(tokens)\n self.n_grams_by_len = [{} for _ in range(self.n)]\n for i in range(len(tokens)): # for index in tokens\n for n in range(self.n): # for n-gram size from 1 to n:\n if i >= n: # if the index has advanced enough for this n\n n_gram = self.join(tokens[i - n: i + 1])\n n_grams = self.n_grams_by_len[n] # get dict for respective n\n n_grams[n_gram] = n_grams.get(n_gram, 0) + 1 # get dict for respective n\n return self.get_model()", "def unigram_representation(data):\r\n vec = CountVectorizer()\r\n vec = vec.fit(data)\r\n return vec", "def build_model(category_names):\n try:\n # initialise columns to be used for data preparation purposes in the model pipeline\n message_col = 0\n\n # build a pipeline containing the feature transformations and estimator\n pipeline = Pipeline([\n\n ('features', ColumnTransformer([\n # apply message transformations\n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer(tokenizer=partial(tokenize))),\n ('tfidf', TfidfTransformer())\n ]), message_col),\n\n ('starting_verb', StartingVerbExtractor(), message_col),\n\n ('category_terms', CategoryTermExtractor(category_names=category_names),\n message_col),\n\n ], remainder='drop')),\n\n # specify the estimator\n ('clf', LabelPowerset(MultinomialNB(fit_prior=True)))\n ])\n\n # parameter grid to be used for grid search\n parameters = {\n 'features__text_pipeline__vect__max_features': [10000],\n 'features__text_pipeline__tfidf__sublinear_tf': [True],\n 'features__text_pipeline__vect__ngram_range': [(1,1), (1,2)],\n 'features__text_pipeline__vect__min_df': [1],\n 'features__text_pipeline__vect__max_df': [.95],\n 'features__text_pipeline__tfidf__smooth_idf': [True],\n 'features__text_pipeline__tfidf__norm': ['l2'],\n 'clf__classifier__alpha': [0.01, 1.]\n }\n\n # perform cross validation using grid search on the pipeline described above\n cv = GridSearchCV(pipeline, param_grid=parameters, cv=5, verbose=2)\n return cv\n except:\n raise Exception(\"Could not build model.\")\n #finally:\n # return cv", "def __init__(self,training_data):\n my_data = genfromtxt(training_data, delimiter='\\t',skip_header=0)\n n_col = my_data.shape[1]\n n_features=n_col-1 #assuming that the latest column\n #contains the the outputs \n #preprocessing data\n X = preprocessing.scale(np.hsplit(my_data,[n_features,n_col])[0])\n Y = np.squeeze(np.asarray(np.hsplit(my_data,[n_features,n_col])[1]))\n #defining scaling\n self.scaler = preprocessing.Scaler()\n self.scaler.fit(np.hsplit(my_data,[n_features,n_col])[0])\n #define classifier\n self.classifier = svm.LinearSVC(class_weight='auto',C=1.0)\n self.classifier.fit(X, Y)" ]
[ "0.6835612", "0.6641352", "0.66403395", "0.6555825", "0.6487551", "0.6304003", "0.6169526", "0.61496544", "0.6141949", "0.61404395", "0.6100387", "0.6087156", "0.606636", "0.6064309", "0.60637945", "0.60066015", "0.5956803", "0.5955455", "0.59401214", "0.592966", "0.5922185", "0.59108245", "0.5902111", "0.58995694", "0.5880082", "0.5856507", "0.584686", "0.5829975", "0.581364", "0.5793886", "0.5792399", "0.57829046", "0.5779052", "0.577409", "0.57655025", "0.5759597", "0.5746837", "0.574068", "0.57279783", "0.5725385", "0.5714474", "0.57091284", "0.57038", "0.5703311", "0.56978613", "0.56922024", "0.5691449", "0.5691", "0.5688698", "0.56818086", "0.56810397", "0.56808716", "0.56733805", "0.5664954", "0.5652718", "0.565055", "0.56444824", "0.5633117", "0.5629669", "0.56234527", "0.56233805", "0.56182355", "0.56134343", "0.56103534", "0.5598798", "0.55778664", "0.5569863", "0.55641973", "0.5559254", "0.5559254", "0.5556268", "0.5554074", "0.5550835", "0.55412596", "0.5536855", "0.5527457", "0.5518826", "0.5509024", "0.55076617", "0.5504445", "0.5501656", "0.548846", "0.5487628", "0.5477072", "0.5475356", "0.54750705", "0.5474882", "0.5465698", "0.54626673", "0.5461463", "0.5456123", "0.54556394", "0.5453607", "0.5452587", "0.54472184", "0.5443178", "0.5440296", "0.5439759", "0.54313385", "0.54309106", "0.5430906" ]
0.0
-1
Generate a list of ranks that get harder to obtain as they approach the maximum
def generate_ranks(maximum: int, steps: int) -> List[int]: ranks = [] for i in range(steps): ranks += [maximum] maximum = int(maximum * 0.75) RANK_CUTOFFS = list(reversed(ranks)) return RANK_CUTOFFS
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findRanks(toBeRanked, values):\n\treturn list(map(lambda e: findRank(e, values), toBeRanked))", "def resolveTie(self, hand_ranking, tie_list):\n max_rank_list = [] \n\n for i in range(5):\n \"\"\" Lowest rank card as baseline \"\"\"\n curr_max_rank = 0 \n for player in tie_list:\n if hand_ranking.player_best_hand_dict[player.name][i].rank > curr_max_rank:\n curr_max_rank = hand_ranking.player_best_hand_dict[player.name][i].rank\n max_rank_list.append(curr_max_rank)\n\n \"\"\" Compare player hands to max_rank_list \"\"\"\n \"\"\" Start with final card and loop towards lowest rank \"\"\"\n for i in range(5-1, -1, -1):\n for player in tie_list:\n if hand_ranking.player_best_hand_dict[player.name][i].rank < max_rank_list[i] and len(tie_list) > 1:\n tie_list.remove(player)\n return tie_list", "def get_rank_probabilities(n: int) -> List[float]:\n alpha = 3.5\n ranks = [1 / i**alpha for i in range(1, n + 1)]\n\n return [r / sum(ranks) for r in ranks]", "def get_ranks(d): \n raise NotImplementedError(\"Problem 3 Incomplete\")", "def findRelativeRanks(self, nums: List[int]) -> List[str]:\n scores = sorted(nums, reverse=True)\n rewards = {}\n for i, score in enumerate(scores):\n if i == 0:\n reward = 'Gold Medal'\n elif i == 1:\n reward = 'Silver Medal'\n elif i == 2:\n reward = 'Bronze Medal'\n else:\n reward = str(i + 1)\n rewards[score] = reward\n return [rewards[score] for score in nums]", "def get_ranks(w_vector):\n tmp = np.flip(w_vector.argsort())\n ranks = np.empty_like(tmp)\n ranks[tmp] = np.arange(len(w_vector))\n return ranks", "def _rank(measure):\n sort_idx = np.argsort(-measure)\n ranks = np.empty(len(measure), int)\n ranks[sort_idx] = np.arange(1, len(measure)+1)\n return ranks", "def recommendation_ranking(self):\n iu = self.final_recommendation_score_matrix()\n new_iu = []\n for row in iu:\n li = []\n temp = row\n if self.product != \"dist\":\n temp = -np.sort(-temp)\n for element in row:\n li.append(binary_search_opp(temp,element)+1) \n else:\n temp = np.sort(temp)\n for element in row:\n li.append(np.searchsorted(temp,element)+1)\n new_iu.append(li)\n return np.array(new_iu)", "def ring_winners(b, players):\n winners = []\n winrank = ''\n s = [evaluator.evaluate(b, p) for p in players]\n for i, rank in enumerate(s):\n if rank == min(s):\n winners.append(i)\n winrank = evaluator.class_to_string(evaluator.get_rank_class(rank))\n return [winners, winrank]", "def __rank__(self) -> int:", "def climbingLeaderboard(scores, alice):\n unique_scores = list({score: None for score in scores}.keys())[::-1]\n ranks = []\n # last_score_index = 0\n for game_score in alice:\n for i, score in enumerate(unique_scores):\n if score > game_score:\n ranks.append(len(unique_scores) - i + 1)\n break\n elif score == game_score:\n ranks.append(len(unique_scores) - i)\n break\n elif i == len(unique_scores) - 1:\n ranks.append(1)\n else:\n continue\n\n return ranks", "def card_ranks(hand):\n ranks = ['--23456789TJQKA'.index(r) for r, s in hand]\n ranks.sort(reverse = True)\n return [5, 4, 3, 2, 1] if (ranks == [14, 5, 4, 3, 2]) else ranks", "def _rank(self, ranking, n):\n return nlargest(n, ranking, key=ranking.get)", "def gen_rank(names, merits):\n if len(names) > 1:\n winner = random_choice(range(len(names)), merits)\n winner_name = names.pop(winner)\n re_scale_factor = 1.0/(1.0-merits.pop(winner))\n merits = [y*re_scale_factor for y in merits]\n return [winner_name] + gen_rank(names, merits)\n else: \n return [names[0]]", "def _compute_relative_leaderboard_indexes(ranking, size):\n if ranking == 0 or ranking == 1:\n return (0, 5)\n elif ranking == size or ranking == size-1:\n return (max(0, size-5), size)\n else:\n return (max(0, ranking-2), max(size, ranking+3))", "def rank_teams_of_curr_run(curr_score, curr_ranking):\n for place in curr_ranking:\n curr_place = get_key_with_max_value(curr_score)\n curr_ranking[place] = curr_ranking[place].__add__([curr_place])\n curr_score.pop(curr_place)\n return curr_ranking", "def get_rank(score):\n if score in range(0, 500):\n return RANKTYPES[0]\n elif score in range(500, 1500):\n return RANKTYPES[1]\n elif score in range(1500, 2000):\n return RANKTYPES[2]\n elif score in range(2000, 2500):\n return RANKTYPES[3]\n elif score in range(2500, 3000):\n return RANKTYPES[4]\n elif score in range(3000, 4000):\n return RANKTYPES[5]\n elif score in range(4000, 5500):\n return RANKTYPES[6]\n elif score > 5500:\n return RANKTYPES[7]", "def _rank(self):\r\n return sorted(self.player_points.items(),key=lambda x:x[1],reverse=True)", "def questionScores():\n rank = [1,2,3,4]\n scores = \"\"\n for x in range(4):\n rand = random.randint(1, 4)\n while rank[rand-1] == 0:\n rand = random.randint(1,4)\n scores += str(rank[rand-1])\n rank[rand-1] = 0\n return scores", "def rank():\n return 0", "def card_ranks(cards):\n ranks = [\"--23456789TJQKA\".index(r) for r,s in cards] # Each card contains a rank and a suit, hand/cards == [(11, 'Q'), (9, 'D')] \n # Using a \"Rank Strings Array\" (i.e using an array to represent the rank strings) to index it for the ranks\n ranks.sort(reverse=True)\n return [5, 4, 3, 2, 1] if (ranks == [14, 5, 3, 2, 1]) else ranks", "def hand_rank(hand):\n ranks = card_ranks(hand) # ranks is a list of all the ranks. A sorted list of ranks is returned\n if straight(hand) and flush(hand): # Straight flush\n return (8, max(ranks)) # 2 3 4 5 6 (8, 6) 6 7 8 9 T (8, 10)\n elif kind(4, ranks): # Here kind(4, ranks) is used to return a bolean value\n # kind(4, ranks) returns the int when true, returns false if not true (used as boolean)\n return (7, kind(4, ranks), kind(1, ranks)) # 9 9 9 9 3 (7, 9, 3) 9 9 9 9 5 (7, 9, 5)\n elif kind(3, ranks) and kind(2, ranks): # full house\n return (6, kind(3, ranks), kind(2, ranks))\n elif flush(hand): # flush\n return (5, ranks)\n elif straight(ranks): # straight\n return (4, max(ranks))\n elif kind(3, ranks): # 3 of a kind\n return (3, kind(3, ranks), ranks)\n elif two_pair(ranks): # 2 pair\n return (2, two_pair(ranks), ranks)\n elif kind(2, ranks): # kind\n return (1, kind(2, ranks), ranks)\n else: # high card\n return (0, ranks)", "def findRelativeRanks(nums):\n compare_lst = copy.deepcopy(nums)\n compare_lst.sort(reverse=True)\n for i in nums:\n compare_index = compare_lst.index(i)\n nums_index = nums.index(i)\n if compare_index > 2:\n nums[nums_index] = str(compare_index + 1)\n elif compare_index == 0:\n nums[nums_index] = 'Gold Medal'\n elif compare_index == 1:\n nums[nums_index] = 'Silver Medal'\n else:\n nums[nums_index] = 'Bronze Medal'\n return nums", "def hand_rank(hand):\n ranks = card_ranks(hand)\n if straight(ranks) and flush(hand):\n return (8, max(ranks))\n elif kind(4, ranks):\n return (7, kind(4, ranks), kind(1, ranks))\n elif kind(3, ranks) and kind(2, ranks):\n return (6, kind(3, ranks), kind(2, ranks))\n elif flush(hand):\n return (5, ranks)\n elif straight(ranks):\n return (4, max(ranks))\n elif kind(3, ranks):\n return (3, kind(3, ranks), ranks)\n elif two_pair(ranks):\n return (2, two_pair(ranks), ranks)\n elif kind(2, ranks):\n return (1, kind(2, ranks), ranks)\n else:\n return (0, ranks)", "def assignRanks(self):\r\n\t\trank = 0\r\n\t\tscores = list(self._playerScores)\r\n\t\tscores.reverse()\r\n\t\tfor playerScore in scores:\r\n\t\t\tif not playerScore.has(NOT_MET) or not playerScore.value(NOT_MET):\r\n\t\t\t\trank += 1\r\n\t\t\t\tplayerScore.set(RANK, smallText(BugUtil.colorText(u\"%d\" % rank, ScoreOpt.getRankColor())))\r\n\t\tif rank > 0:\r\n\t\t\tself._anyHas[RANK] = True", "def climbingLeaderboard(scores, alice):\n\n # unique scores\n scores = sorted(list(set(scores))) # asc\n player_ranks = []\n idx = 0\n n = len(scores)\n\n for alice_score in alice: # alice in asc order\n \n # Find the rank. For next alice score (which is not smaller), continue from the same index\n while (n > idx and alice_score >= scores[idx]):\n idx += 1\n\n player_ranks.append(n+1-idx)\n\n return player_ranks", "def ranks_from_scores(scores, rank_gap=1e-15):\n prev_score = None\n rank = 0\n for i, (key, score) in enumerate(scores):\n try:\n if abs(score - prev_score) > rank_gap:\n rank = i\n except TypeError:\n pass\n\n yield key, rank\n prev_score = score", "def abilityScores():\n\n scores_list = []\n\n for i in range(6):\n temp_list = []\n for j in range(4):\n temp_list.append(r.choice([1,2,3,4,5,6]))\n temp_list.sort()\n scores_list.append(temp_list[1]+temp_list[2]+temp_list[3])\n scores_list.sort()\n return scores_list", "def get_all_rankings(session: CondorSession) -> List[sc.Ranking]:\n return [sc.Ranking(matrix) for matrix in RankingMatrix.list(session)]", "def recommend_from_scores(scores: List[List[float]], n: int) -> List[List[int]]:\n\n def top_idx(scores):\n return np.array(scores).argsort()[::-1][:n]\n\n return [top_idx(s) for s in scores]", "def _rank(self, ranking, n):\n return nlargest(n, ranking, key=ranking.get)", "def _sub_ranker_RN(self,numbers_frequency):\n rank_dictionary={'42':3,'32':4,'33':7,'23':8,'24':9,'15':10}\n\n #in this subspace where sequences(of 5 cards) and repetead suits (of 5 cards) are not posible\n #there is a relation between , max frequency and number of different numbers (freq>0) with the rank\n\n case=str(max(numbers_frequency))+str(len(numbers_frequency.loc[numbers_frequency>0]))\n\n return rank_dictionary[case]", "def ranks(cls):\n ranked = []\n for team in sorted(dbsession.query(cls).order_by(desc(cls.money)).all()):\n if not team.locked:\n ranked.append(team)\n return ranked", "def straight(ranks):\n return max(ranks) - min(ranks) == 4 and len(set(ranks)) == 5", "def ranking(orig_data):\n data = np.copy(orig_data)\n values = np.sort(data)\n rank = np.zeros(data.shape)\n r = 0\n for i in range(values.shape[0]):\n for j in range(data.shape[0]):\n if data[j] == values[i]:\n rank[j] = r\n data[j] = 9223372036854775807 # MaxInt\n break\n if i < values.shape[0]-1 and values[i] < values[i+1]:\n r = i + 1\n return rank", "def _update_ranks(angles, min_angle, max_angle):\n return angles / tf.cast(\n tf.reduce_max(tf.abs(tf.stack([min_angle, max_angle]))), tf.float64\n )", "def two_pair(ranks):\n result = [r for r in set(ranks) if ranks.count(r) == 2]\n if len(result) == 2:\n return (max(result), min(result))", "def mostVisited(self, n: int, rounds):\n start, end = rounds[0], rounds[-1]\n if start <= end:\n return list(range(start, end+1))\n else:\n return list(range(1, end+1)) + list(range(start, n+1))", "def rank_more_than_five_cards(cards):\n combinations_of_five = list(combinations(cards, 5))\n return max([rank_five_cards(cards) for cards in combinations_of_five])", "def rerank_candidates(s, pred2sub_rank, all_predictions, rerank_top=20):\n predicted_smiles = []\n model_input = []\n for (predict_smi, label), _ in Counter(all_predictions).most_common(rerank_top):\n if predict_smi == s:\n continue\n features = get_all_features(\n get_all_ranking_info(pred2sub_rank[predict_smi]))\n predicted_smiles.append((predict_smi, label))\n model_input.append(features)\n\n model = RankingModel()\n model.load_state_dict(torch.load('./models/ranker/rank_model.pt', map_location='cpu'))\n model.eval()\n\n test_loader = DataLoader(RankingTestDataset(\n model_input), batch_size=1000, shuffle=False, num_workers=2)\n ranking_scores = []\n for data in test_loader:\n outputs = model(data)[0]\n ranking_scores.extend(outputs.detach().cpu().numpy())\n\n assert len(predicted_smiles) == len(ranking_scores)\n pred_smi2score = {k: v[1]\n for k, v in zip(predicted_smiles, ranking_scores)}\n return pred_smi2score", "def get_n_best(self):\n pass", "def set_rank_order(self):\n for k in self._run:\n self._run[k].sort(key=lambda x:x.get_rank(),reverse=False)\n tot_res = len(self._run[k])\n for r in self._run[k]:\n r.set_score(tot_res - int(r.get_rank()) + 1)\n print r.get_str()", "def rank(self):\n rank = 0\n rho = self.array_form[:]\n n = self.size - 1\n size = n + 1\n psize = int(ifac(n))\n for j in xrange(size - 1):\n rank += rho[j]*psize\n for i in xrange(j + 1, size):\n if rho[i] > rho[j]:\n rho[i] -= 1\n psize //= n\n n -= 1\n return rank", "def rank(players, community):\n\t# Structure that holds the player ranking results\n\tclass RankResults():\n\t\tdef __init__(self):\n\t\t\tself.winner = [] # (WIN, player_name) or (TIE, [player1, player2, ...]) \n\t\t\tself.bestHands = [] # [(pl_name, bestHand, handRank), ... ]\n\t\t\tself.kicker = [] # If player hands' ranks tie but lose\n\t\t\t\t\t# by kicker, this will have one card\n\t\t\t\n\t\n\t\tdef __repr__(self):\n\t\t\tif self.winner[0] == \"Win\":\n\t\t\t\twinPlayerIndex = [player[0] for player in \\\n\t\t\t\t\t\t\tself.bestHands].index(self.winner[1])\n\t\t\telse:\n\t\t\t\twinPlayerIndex = [player[0] for player in \\\n\t\t\t\t\t\t\tself.bestHands].index(self.winner[1][0])\n\t\t\twinningRank = self.bestHands[winPlayerIndex][2]\n\t\t\t\n\t\t\t# Returns Win/Tie, player name, and winning rank\n\t \treturn str(self.winner) + \" rank = \" + str(winningRank) + \" kicker = \" \\\n\t\t\t\t+ str(self.kicker)\n\n\t### Rank function definition starts here\n\n\t# Dictionary for converting card strings to numbers\n\tcardNums = {\"A\":14, \"K\":13, \"Q\":12, \"J\":11, \"10\":10, \"9\":9, \"8\":8, \\\n\t\t\t\"7\":7, \"6\":6, \"5\":5, \"4\":4, \"3\":3, \"2\":2}\n\n\t# scan each player's hand and return their best hand\n\twinHands = []\n\tresult = RankResults()\n\tfor player in players:\n\t\tcards = player.hand + community\n\t\t(playerHand, handStrength) = best_hand(cards)\n\t\tif len(winHands) != 0:\n\t\t\t# compare current player's hand to other\n\t\t\t# players in the best hands list\n\t\t\tif handStrength > winHands[0][2]:\n\t\t\t\twinHands = [(player.name, playerHand, handStrength)]\n\t\t\telif handStrength == winHands[0][2]:\n\t\t\t\twinHands.append( (player.name, playerHand, handStrength) )\n\t\t# if first player in list, \n\t\t# create a new list with this player's hand\t\t\t\n\t\telse: \n\t\t\twinHands = [(player.name, playerHand, handStrength)]\n\t\t\t\t\t\n\n\t\t# insert each player's hand into results\n\t\tresult.bestHands.append( (player.name, playerHand, handStrength) )\n\n\t# compare results. \n\t# winHands = ((name, handStrength, hand), ...)\n\tif len(winHands) == 1:\n\t\tresult.winner = (\"Win\", winHands[0][0])\n\telse:\n\t\t# tuple the i cards of every player to facilitate\n\t\t# comparison\n\t\tzippedHands = zip(*[winner[1] for winner in winHands])\n\t\t\n\t\t# Compare top 5 cards of tied winners\n\t\tfor i in range(5):\n\t\t\ttopCards = zippedHands[i]\n\t\t\tlargestCard = max(topCards) # find largest card \n\t\t\tisPlayerRemoved = False # loser detection flag\n\t\t\tnewWinHands = []\n\t\t\tfor j in range(len(topCards)):\n\t\t\t\tif topCards[j] == largestCard:\n\t\t\t\t\tnewWinHands.append(winHands[j]) \n\t\t\t\telse:\n\t\t\t\t\t# Remove players with < max\n\t\t\t\t\tisPlayerRemoved = True\n\t\t\t\t\t#winHands.remove(winHands.index(j))\n\t\t\t\t\t\n\t\t\twinHands = newWinHands\n\t\t\t# If only one winner remaining, stop checking\n\t\t\tif len(winHands) == 1:\n\t\t\t\tresult.kicker = largestCard\n\t\t\t\tresult.winner = (\"Win\", winHands[0][0])\t\t\n\t\t\t\tprint \"best hands = \" + str(result.bestHands)\n\t\t\t\treturn result\t\n\t\t\t# If player was removed, remake zippedHands\n\t\t\tif isPlayerRemoved:\n\t\t\t\tzippedHands = zip(*[winner[1] for winner in winHands])\n\t\t\t\t\t\n\t\t\n\t\tresult.winner = (\"Tie\", [winner[0] for winner in winHands])\n\t\n\tprint \"best hands = \" + str(result.bestHands)\n\n\treturn result", "def test_get_roles_rank(self):\n contrib_as = self.make_assignment(\n self.project, self.user_bob, self.role_contributor\n )\n guest_as = self.make_assignment(\n self.project, self.user_carol, self.role_guest\n )\n roles = self.project.get_roles()\n self.assertIn(contrib_as, roles)\n self.assertIn(guest_as, roles)\n roles = self.project.get_roles(max_rank=30)\n self.assertEqual(roles, [contrib_as, self.owner_as_cat])\n roles = self.project.get_roles(max_rank=40)\n self.assertEqual(roles, [contrib_as, guest_as, self.owner_as_cat])\n roles = self.project.get_roles(min_rank=30)\n self.assertEqual(roles, [contrib_as, guest_as])\n roles = self.project.get_roles(min_rank=40)\n self.assertEqual(roles, [guest_as])\n roles = self.project.get_roles(min_rank=30, max_rank=30)\n self.assertEqual(roles, [contrib_as])", "def get_active_ranks(self,fine=False):\n \n if fine:\n nqpt = self.nqpt_fine\n else:\n nqpt = self.nqpt\n \n #max_nqpt_per_worker = (self.nqpt // size\n # + min(self.nqpt % size, 1))\n #n_active_workers = (self.nqpt // max_nqpt_per_worker\n # + min(self.nqpt % max_nqpt_per_worker, 1))\n max_nqpt_per_worker = (nqpt // size\n + min(nqpt % size, 1))\n n_active_workers = (nqpt // max_nqpt_per_worker\n + min(nqpt % max_nqpt_per_worker, 1))\n return np.arange(n_active_workers)", "def get_triangle_numbers(max_score):\n l=[]\n n = 1\n t_n = triangular_number(n)\n while t_n <= max_score:\n l.append(t_n)\n n += 1\n t_n = triangular_number(n)\n return l", "def find_ranking(comparisons, equal_width=0.2, max_rank=-1, verbose=False):\n # remove unnecessary variables\n comparisons = {(i, j) if i < j else (j, i): value if i < j else 1 - value\n for (i, j), value in comparisons.items()}\n nodes = np.unique(\n [i for ij in comparisons.keys() for i in ij])\n\n # define variables\n model = Model('comparison')\n model.setParam('OutputFlag', verbose)\n values = np.fromiter(comparisons.values(), dtype=float)\n assert values.max() <= 1 and values.min() >= 0\n # variables to encode the error of comparisons\n E_ij = model.addVars(comparisons.keys(), name='e_ij', vtype=GRB.CONTINUOUS,\n ub=1.0-values, lb=-values)\n # variables to encode hard choice of >=, <=, ==\n Ge_ij = model.addVars(comparisons.keys(), name='ge_ij', vtype=GRB.BINARY)\n Le_ij = model.addVars(comparisons.keys(), name='le_ij', vtype=GRB.BINARY)\n Eq_ij = model.addVars(comparisons.keys(), name='eq_ij', vtype=GRB.BINARY)\n # variables to help with transitivity in non-fully connected graphs\n if max_rank < 1:\n max_rank = len(nodes)\n R_i = model.addVars(nodes, name='r_i', vtype=GRB.CONTINUOUS, lb=0,\n ub=max_rank)\n # variables to emulate abs\n T_ij_pos = {}\n T_ij_neg = {}\n index = (values != 1) & (values != 0)\n T_ij_pos = model.addVars(\n (ij for ij, value in comparisons.items() if value not in [0.0, 1.0]),\n vtype=GRB.CONTINUOUS, name='T_ij_pos', lb=0, ub=1-values[index])\n T_ij_neg = model.addVars(\n (ij for ij, value in comparisons.items() if value not in [0.0, 1.0]),\n vtype=GRB.CONTINUOUS, name='T_ij_neg', lb=0, ub=values[index])\n model.update()\n\n # emulate abs for non-binary comparisons: E_ij = T_ij_pos - T_ij_neg\n model.addConstrs(\n (E_ij[ij] == T_ij_pos[ij] - T_ij_neg[ij] for ij in T_ij_pos),\n 'E_ij = T_ij_pos - T_ij_neg')\n\n # hard decision of >=, <=, and ==\n lower_bound = 0.5 - equal_width / 2.0\n upper_bound = 0.5 + equal_width / 2.0\n # <=\n model.addConstrs(\n (E_ij[ij] + comparisons[ij] - upper_bound <= ge_ij\n for ij, ge_ij in Ge_ij.items()), 'ge_ij_lower_bound')\n model.addConstrs(\n (E_ij[ij] + comparisons[ij] - upper_bound >= -1 + ge_ij\n for ij, ge_ij in Ge_ij.items()), 'ge_ij_upper_bound')\n # >=\n model.addConstrs(\n (E_ij[ij] + comparisons[ij] - lower_bound >= -le_ij\n for ij, le_ij in Le_ij.items()), 'le_ij_lower_bound')\n model.addConstrs(\n (E_ij[ij] + comparisons[ij] - lower_bound <= 1 - le_ij\n for ij, le_ij in Le_ij.items()), 'le_ij_upper_bound')\n # ==\n model.addConstrs(\n (le + eq + ge == 1 for le, eq, ge in zip(\n Le_ij.values(), Eq_ij.values(), Ge_ij.values())), 'eq_ij')\n\n # transitivity\n for (i, j), eq_a in Eq_ij.items():\n le_a = Le_ij[i, j]\n ge_a = Ge_ij[i, j]\n for k in nodes:\n j_, k_ = j, k\n if j > k:\n j_, k_ = k, j\n eq_b = Eq_ij.get((j_, k_), None)\n if eq_b is None:\n continue\n else:\n le_b = Le_ij[j_, k_]\n ge_b = Ge_ij[j_, k_]\n if j_ != j:\n le_b, ge_b = ge_b, le_b\n\n i_, k_ = i, k\n if i > k:\n i_, k_ = k, i\n eq_c = Eq_ij.get((i_, k_), None)\n if eq_c is None:\n continue\n else:\n le_c = Le_ij[i_, k_]\n ge_c = Ge_ij[i_, k_]\n if i_ != i:\n le_c, ge_c = ge_c, le_c\n\n # a <= b and b <= c -> a <= c\n model.addLConstr(\n ge_a + ge_b, GRB.LESS_EQUAL, 1 + ge_c,\n f'transitivity_ge_{i},{j},{k}')\n # a >= b and b >= c -> a >= c\n model.addLConstr(\n le_a + le_b, GRB.LESS_EQUAL, 1 + le_c,\n f'transitivity_le_{i},{j},{k}')\n # a <= b and b == c -> a <= c\n model.addLConstr(\n le_a + eq_b, GRB.LESS_EQUAL, 1 + le_c,\n f'transitivity_leeq_{i},{j},{k}')\n # a == b and b <= c -> a <= c\n model.addLConstr(\n eq_a + le_b, GRB.LESS_EQUAL, 1 + le_c,\n f'transitivity_eqle_{i},{j},{k}')\n # a >= b and b == c --> a >= c\n model.addLConstr(\n ge_a + eq_b, GRB.LESS_EQUAL, 1 + ge_c,\n f'transitivity_geeq_{i},{j},{k}')\n # a == b and b >= c --> a >= c\n model.addLConstr(\n eq_a + ge_b, GRB.LESS_EQUAL, 1 + ge_c,\n f'transitivity_eqge_{i},{j},{k}')\n # a == b and b == c --> a == c\n model.addLConstr(\n eq_a + eq_b, GRB.LESS_EQUAL, 1 + eq_c,\n f'transitivity_eq_{i},{j},{k}')\n\n # transitivity helper (for not-fully connected graphs)\n # also provides a latent rank\n big_m = max_rank\n model.addConstrs(\n ((1 - ge_ij) * big_m + R_i[i] >= R_i[j] + 1 for (i, j), ge_ij in Ge_ij.items()),\n 'rank_transitivity_larger')\n model.addConstrs(\n ((1 - le_ij) * big_m + R_i[j] >= R_i[i] + 1 for (i, j), le_ij in Le_ij.items()),\n 'rank_transitivity_smaller')\n model.addConstrs(\n ((1 - eq_ij) * big_m + R_i[j] >= R_i[i] for (i, j), eq_ij in Eq_ij.items()),\n 'rank_transitivity_equal1')\n model.addConstrs(\n ((1 - eq_ij) * big_m + R_i[i] >= R_i[j] for (i, j), eq_ij in Eq_ij.items()),\n 'rank_transitivity_equal2')\n\n # objective function\n objective = LinExpr()\n for ij, value in comparisons.items():\n if value == 1.0:\n objective += -E_ij[ij]\n elif value == 0.0:\n objective += E_ij[ij]\n else:\n objective += T_ij_pos[ij] + T_ij_neg[ij]\n model.setObjective(objective, GRB.MINIMIZE)\n\n # solve\n model.optimize()\n\n # verify abs emulation: one T_ij has to be 0\n for ij, value in T_ij_pos.items():\n assert value.X == 0 or T_ij_neg[ij] == 0, \\\n f'T_{ij} pos {value.X} neg {T_ij_neg[ij]}'\n\n # find minimal Rs\n model_ = Model('comparison')\n model_.setParam('OutputFlag', verbose)\n R_i = model_.addVars(nodes, name='r_i', vtype=GRB.CONTINUOUS, lb=0,\n ub=len(nodes))\n for ((i, j), ge_ij), le_ij in zip(Ge_ij.items(), Le_ij.values()):\n if ge_ij.x == 1:\n model_.addConstr(R_i[i] >= R_i[j] + 1)\n elif le_ij.x == 1:\n model_.addConstr(R_i[j] >= R_i[i] + 1)\n else:\n model_.addConstr(R_i[j] == R_i[i])\n model_.setObjective(R_i.sum(), GRB.MINIMIZE)\n model_.optimize()\n\n return [model_.getVarByName(f'r_i[{i}]').X for i in range(len(nodes))], \\\n model.objVal", "def personal_best(scores: list) -> int:\n return max(scores)", "def straight(ranks):\n return (max(ranks)-min(ranks) == 4) and len(set(ranks)) == 5", "def atiecorrect(rankvals):\r\n sorted,posn = ashellsort(N.array(rankvals))\r\n n = len(sorted)\r\n T = 0.0\r\n i = 0\r\n while (i<n-1):\r\n if sorted[i] == sorted[i+1]:\r\n nties = 1\r\n while (i<n-1) and (sorted[i] == sorted[i+1]):\r\n nties = nties +1\r\n i = i +1\r\n T = T + nties**3 - nties\r\n i = i+1\r\n T = T / float(n**3-n)\r\n return 1.0 - T", "def _compute_ranks(logits, is_valid):\n _check_tensor_shapes([logits, is_valid])\n # Only sort entries with is_valid = True.\n scores = tf.compat.v1.where(\n is_valid, logits, -1e-6 * tf.ones_like(logits) +\n tf.reduce_min(input_tensor=logits, axis=1, keepdims=True))\n return utils.sorted_ranks(scores)", "def _compute_rank(self):\n# print(Card((self.ranks[0]),self.suits[0]))\n# print(Card((self.ranks[1]),self.suits[1]))\n# print(Card((self.ranks[2]),self.suits[2]))\n# print(Card.ranks[self.ranks[0]])\n# #print(Card.ranks[self.ranks[0]+1])\n# print(self.ranks[1])\n# print(Card.suits[self.suits[1]])\n a = ['Ace','2','3']\n newlist =[self.ranks[0],self.ranks[1],self.ranks[2]]\n newlist = sorted(newlist)\n if(Card.suits[self.suits[0]] == Card.suits[self.suits[1]] == Card.suits[self.suits[2]]):\n #a = ['Ace','2','3']\n if(Card.ranks[self.ranks[0]] in a) and (Card.ranks[self.ranks[1]] in a) and (Card.ranks[self.ranks[2]] in a):\n self.rank=5\n else:\n if(newlist[1] - newlist[0]) == 1 and (newlist[2]-newlist[1])==1:\n #StraightFlush\n self.rank=5\n else:\n #Flush\n self.rank=2\n \n #Threeofakind\n elif (Card.ranks[self.ranks[0]] == Card.ranks[self.ranks[1]] == Card.ranks[self.ranks[2]]):\n self.rank=4\n #Pair\n elif(Card.ranks[self.ranks[0]]==Card.ranks[self.ranks[1]] or Card.ranks[self.ranks[0]]==Card.ranks[self.ranks[2]] or Card.ranks[self.ranks[1]]==Card.ranks[self.ranks[2]] or Card.ranks[self.ranks[2]]==Card.ranks[self.ranks[1]]):\n self.rank=1 \n #Straight\n elif(((newlist[1] - newlist[0]) == 1) and (newlist[2]-newlist[1])==1):\n self.rank=3\n \n elif((Card.ranks[self.ranks[0]] in a) and (Card.ranks[self.ranks[1]] in a) and (Card.ranks[self.ranks[2]] in a)):\n if(Card.ranks[self.ranks[0]] != Card.ranks[self.ranks[1]] != Card.ranks[self.ranks[2]]):\n #if((Card.ranks[self.ranks[0]] != Card.ranks[self.ranks[1]]) and (Card.ranks[self.ranks[0]]!= Card.ranks[self.ranks[2]])and (Card.ranks[self.ranks[1]]!= Card.ranks[self.ranks[2]])):\n self.rank=3\n\n else:\n self.rank=0\n #pass", "def rank_transform(self):\n sorted_targets = sorted(self.genomes, key=lambda item: item.fitness)\n for index, target in enumerate(sorted_targets):\n target.fitness = index/len(sorted_targets) - 0.5", "def check_hand_rank(hand):\n card_rank = ['--23456789TJQKA'.index(n) for n,h in hand]\n card_rank.sort()\n card_rank.reverse()\n #for royal straight flush\n card_rank_rsf = ['HDSC'.index(h) for n,h in hand]\n card_rank_rsf.sort()\n card_rank_rsf.reverse()\n if card_rank == [14,5,4,3,2]:\n card_rank = [5,4,3,2,1]\n if royal_straight_flush(hand):\n return 9,card_rank_rsf[0]\n elif straight_flush(hand):\n return 8,max(card_rank)\n elif four_of_a_kind(hand):\n return 7,max(card_rank)\n elif full_house(hand):\n tong = 0\n kuu = 0\n s = [n for n,h in hand]\n for i in xrange(len(s)):\n if(s.count(s[i])==3):\n tong = s[i]\n else:\n kuu = s[i]\n return 6,int(tong),int(kuu)\n elif flush(hand):\n return 5,max(card_rank)\n elif straight(hand):\n return 4,max(card_rank)\n elif three_of_a_kind(hand):\n ld = 0\n a = 0\n for i in xrange(0,3):\n if card_rank.count(card_rank[i]) > 1 :\n ld = (card_rank[i])\n else:\n a = card_rank[i]\n return 3,ld,a\n elif two_pair(hand):\n ld = []\n a = 0\n for i in xrange(0,3):\n if card_rank.count(card_rank[i]) >=2:\n ld.append(card_rank[i])\n card_rank.pop(i)\n else:\n a = card_rank[i]\n ld.sort(reverse=True)\n return 2,ld[0],ld[1],a\n elif one_pair(hand):\n ld = 0\n a = []\n for i in xrange(len(card_rank)):\n if card_rank.count(card_rank[i]) > 1 :\n ld = (card_rank[i])\n else:\n a.append(card_rank[i])\n a.sort(reverse = True)\n return 1,ld,a[0],a[1],a[2]\n else:\n return 0,max(card_rank)", "def get_players_by_rank(self):\n return sorted(self.participants, key=lambda p: p.tournament_score, reverse=True)", "def spearman_rank_unique(X,Y,n):\n\n rank_X = get_rank(X, n)\n rank_Y = get_rank(Y, n)\n\n twos = [2]*n\n diff = map(exponent, map(subtract, rank_X, rank_Y), twos)\n\n return 1 - 6*float(sum(diff))/(n*(n**2 - 1))", "def scan_cards(player, river):\r\n best_rank = 0\r\n cards = player.hand + river\r\n hands = combinations(cards, 5) # find all 5 card hands\r\n best_hands = []\r\n for h in hands:\r\n flat = list(sum(h, ()))\r\n prep = np.zeros(shape=(10,))\r\n j = 0\r\n for i in flat:\r\n prep[j] = i\r\n j = j+1\r\n input = np.zeros(shape=(1,10))\r\n input[0] = prep\r\n rank = np.argmax(player.ai.predict(input)[0])\r\n\r\n if rank == best_rank:\r\n best_hands.append(h)\r\n if rank > best_rank:\r\n best_rank = rank\r\n best_hands = []\r\n best_hands.append(h)\r\n final_hand = best_hand(best_hands)\r\n return (best_rank, final_hand)", "def get_rank(weight):\n weight = min(1.0, max(weight, 0.0))\n ranks = [x for x in ALL_RANKS if weight >= x.min_weight]\n ranks.sort(key=lambda x: x.min_weight)\n return ranks.pop()", "def _compute_ranks(df, lower_better=True):\n # return df.rank(axis=1, numeric_only=True, ascending=lower_better)\n return df.rank(axis=1, numeric_only=True, ascending=lower_better, method='min')", "def _get_rank(self,fitness):\n # infact you can get the order or rank by only once sort.\n rank=fitness[:,0].argsort().argsort() # [n]\n return rank", "def competitionRanking(groups, setRank):\n rank = 1\n for k, g in groups:\n cnt = 0\n for item in g:\n setRank(item, rank)\n cnt += 1\n rank += cnt", "def findRank(e, values):\n\tcount = 1\n\tfor ve in values:\n\t\tif ve < e:\n\t\t\tcount += 1\n\treturn count", "def personal_best(scores):\n return max(scores)", "def rank(x: np.ndarray):\n assert x.ndim == 1\n ranks = np.empty(len(x), dtype=int)\n ranks[x.argsort()] = np.arange(len(x))\n return ranks", "def get_rank(points: int, cutoffs: List[int]) -> int:\n rank = 0\n for i, cutoff in enumerate(cutoffs):\n if points < cutoff:\n if i == 0:\n break\n else:\n rank = i - 1\n break\n else:\n rank = RANK_COUNT - 1\n\n return rank", "def ltiecorrect(rankvals):\r\n sorted,posn = shellsort(rankvals)\r\n n = len(sorted)\r\n T = 0.0\r\n i = 0\r\n while (i<n-1):\r\n if sorted[i] == sorted[i+1]:\r\n nties = 1\r\n while (i<n-1) and (sorted[i] == sorted[i+1]):\r\n nties = nties +1\r\n i = i +1\r\n T = T + nties**3 - nties\r\n i = i+1\r\n T = T / float(n**3-n)\r\n return 1.0 - T", "def workout_rank(a, rank):\r\n # Check if workout score is empty\r\n if pd.isnull(a):\r\n return np.nan\r\n else:\r\n return int(rank)", "def honest_rankings(utilities):\n n_cands = utilities.shape[1]\n\n # 255 candidates is plenty for real elections, so we'll limit it there and\n # use uint8 to save memory.\n if n_cands > 255:\n raise ValueError('Maximum number of candidates is 255')\n\n # Higher utilities for a voter are ranked first (earlier in row)\n return np.argsort(utilities)[:, ::-1].astype(np.uint8)", "def get_maximum_rank(score):\n\tscores = [0, 300, 450, 600, 750] # ranges 0-299, 300-449, etc.\n\trank = None\n\tfor i in range(len(scores)):\n\t\tif score >= scores[i]:\n\t\t\trank = i + 1\n\n\treturn rank", "def percenter(rank, max_rank):\n\treturn 100 * (rank/(max_rank or 1))", "def get_fb_ind_rankings(self):\n\n ranks = []\n self._logger.debug(\"Getting foosball individual rankings\")\n\n try:\n self.check_if_db_connected()\n cursor = self._db_conn.cursor()\n cursor.execute(\"SELECT player_id, first_name, last_name, \\\nnickname FROM player\")\n players = cursor.fetchall()\n\n for player_id, first_name, last_name, nickname in players:\n cursor.execute(\"SELECT fb_offense_rating, fb_defense_rating FROM \\\nplayer WHERE player_id = {0}\".format(player_id))\n offense_rating, defense_rating = cursor.fetchall()[0]\n\n cursor.execute(\"SELECT mu, sigma FROM rating WHERE rating_id \\\n= {0}\".format(offense_rating))\n mu, sigma = cursor.fetchall()[0]\n\n offense_rank = float(mu) - (3 * float(sigma))\n cursor.execute(\"SELECT mu, sigma FROM rating WHERE rating_id \\\n= {0}\".format(defense_rating))\n mu, sigma = cursor.fetchall()[0]\n\n defense_rank = float(mu) - (3 * float(sigma))\n\n cursor.execute(\"SELECT COUNT(result_id) FROM fb_result WHERE \\\noffense_winner = {0}\".format(player_id))\n offense_win_count = cursor.fetchone()[0]\n\n cursor.execute(\"SELECT COUNT(result_id) FROM fb_result WHERE \\\ndefense_winner = {0}\".format(player_id))\n defense_win_count = cursor.fetchone()[0]\n\n cursor.execute(\"SELECT COUNT(result_id) FROM fb_result WHERE \\\noffense_loser = {0}\".format(player_id))\n offense_lose_count = cursor.fetchone()[0]\n\n cursor.execute(\"SELECT COUNT(result_id) FROM fb_result WHERE \\\ndefense_loser = {0}\".format(player_id))\n defense_lose_count = cursor.fetchone()[0]\n\n intermediate_rank = (first_name, last_name, nickname,\n 'Offense', round(offense_rank, 4), offense_win_count,\n offense_lose_count)\n ranks.append(intermediate_rank)\n del intermediate_rank\n intermediate_rank = (first_name, last_name, nickname,\n 'Defense', round(defense_rank, 4), defense_win_count,\n defense_lose_count)\n ranks.append(intermediate_rank)\n del intermediate_rank\n\n except MySQLdb.OperationalError:\n self._logger.error(\"MySQL operational error occured\")\n traceback.print_exc()\n raise exceptions.DBConnectionError(\"Cannot connect to MySQL server\")\n\n except MySQLdb.ProgrammingError:\n self._logger.error(\"MySQL programming error\")\n traceback.print_exc()\n raise exceptions.DBSyntaxError(\"MySQL syntax error\")\n\n else:\n return ranks", "def _get_winning_indices(self, index_to_best_hands):\n winning_indices = []\n winning_hand = None\n for idx, best_hand in index_to_best_hands.iteritems():\n if not winning_indices:\n winning_indices.append(idx)\n winning_hand = best_hand\n continue\n if best_hand > winning_hand:\n winning_indices = [idx]\n winning_hand = best_hand\n elif best_hand == winning_hand:\n winning_indices.append(idx)\n return winning_indices", "def kind(n, ranks):\r\n for i in ranks:\r\n if n == ranks.count(i):\r\n return i", "def generate_good(self, m, n, rank, mu=2, ka=2):\n sr = random.random()\n s = []\n s.append(sr)\n for r in range(rank-1):\n newele = s[-1] * (1 + ka * random.random() / (rank-1))\n s.append(newele)\n s.reverse()\n \n # best_u = None\n # best_mu0 = 0\n # while best_mu0 == 0:\n # for _ in range(10):\n # A = np.random.rand(m,m)\n # A = scipy.linalg.orth(A)\n # u = A[:, :rank]\n # mu0 = self.compute_mu(u, m, rank)\n # print(\"mu0 : \", mu0)\n # if mu0 <= mu and mu0 >= best_mu0:\n # best_mu0 = mu0\n # best_u = u\n # print(\"mu0 for u:\", best_mu0)\n # # print(u.T @ u)\n \n # best_v = None\n # best_mu0 = 0\n # while best_mu0 == 0:\n # for _ in range(10):\n # B = np.random.rand(n,n)\n # B = scipy.linalg.orth(B)\n # v = B[:, :rank]\n # mu0 = self.compute_mu(v, n, rank)\n # print(\"mu0 : \", mu0)\n # if mu0 <= mu and mu0 >= best_mu0:\n # best_mu0 = mu0\n # best_v = v\n # print(\"mu0 for v:\", best_mu0)\n # u = best_u\n # v = best_v\n\n for _ in range(100):\n A = np.random.rand(m,m)\n A = scipy.linalg.orth(A)\n u = A[:, :rank]\n mu0 = self.compute_mu(u, m, rank)\n print(\"mu0 : \", mu0)\n if mu0 <= mu:\n break\n print(\"mu0 for u:\", mu0) \n\n for _ in range(10):\n B = np.random.rand(n,n)\n B = scipy.linalg.orth(B)\n v = B[:, :rank]\n mu0 = self.compute_mu(v, n, rank)\n print(\"mu0 : \", mu0)\n if mu0 <= mu:\n break\n print(\"mu0 for both:\", mu0)\n\n matrix = np.dot(u*s, v.T)\n \n kappa = s[0] / s[-1]\n print(\"kappa=\", kappa)\n \n ss = np.copy(s)\n for k in range(rank):\n ss[k] = s[k] / s[0]\n \n max_entry = np.max(np.abs(np.outer(u[:,:rank], v.T[:rank,:])))\n mu1 = max_entry * math.sqrt(m * n / rank)\n print(\"mu1=\", mu1)\n \n return matrix", "def get_scores(self):\n\n\t\tscores = np.dot(self.rankings, self.weights)\n\t\tranked_indices = np.argsort(scores)\n\t\tranked_sources = self.source_names[ranked_indices]\n\t\tranked_scores = sorted(scores)\n\t\tself.scores = {source:score for source, score in zip(ranked_sources, ranked_scores)}\n\n\t\treturn self.scores", "def page_rank_score(self, rows):\n pageranks = dict([(row[0], self.con.execute('select score from pagerank where urlid=%d'\n % row[0]).fetchone()[0]) for row in rows])\n maxrank = max(pageranks.values())\n normalizedscores = dict([(u, float(l) / maxrank) for (u, l) in pageranks.items()])\n return normalizedscores", "def multiple_ranks(our_data,start, end):\n count = start\n album_list = []\n while count <= end:\n album_list.append(find_by_rank(count))\n count += 1", "def getHighestRank_LowerWilson(self, higherBound = True):\n \n if len(self.Predictors) == 1:\n # there is only one predictor. choose that immediately\n predictor = self.Predictors[0]\n return (predictor, predictor.confidence)\n \n # grab the top 3 wins, top 3 wins-lost, top 3 confidences\n# maxWins = sorted(self.Predictors, key=lambda i: i.scoreWins)\n# maxDiff = sorted(self.Predictors, key=lambda i: i.scoreWins - i.scoreLosts)\n# maxConfidence = sorted(self.Predictors, key=lambda i: i.confidence)\n \n # grab the top predictors by wins, diffs and confidence.\n # on test, this has worse effect on ranking. (need more testing for confirmation)\n filteredPredictors = self.Predictors # no union\n\n # warning: set is non-deterministic\n #filteredPredictors = set(maxWins[:3]) | set(maxDiff[:3]) | set(maxConfidence[:3]) # union\n #filteredPredictors = set(maxWins[:5]) | set(maxDiff[:5]) | set(maxConfidence[:5]) # union\n #filteredPredictors = list(filteredPredictors)\n \n##############\n##todo: add treshold instead?\n#########\n \n predictorScores = []\n for i, predictor in enumerate(filteredPredictors):\n\n if useScoreBuffer == False:\n positiveRatings = predictor.scoreWins\n negativeRatings = predictor.scoreLosts\n totalRatings = predictor.totalTurns\n totalRatings = positiveRatings + negativeRatings\n else:\n positiveRatings = predictor.scoreBuffer.count(scoreBufferWin)\n negativeRatings = predictor.scoreBuffer.count(scoreBufferLost)\n totalRatings = len(predictor.scoreBuffer)\n totalRatings = positiveRatings + negativeRatings\n \n confidence = predictor.confidence\n \n # experiment: what happens if we use our score as confidence in self?\n \n# if confidence >= 1: # possible DNA\n# predictorScores.append((1.0, predictor))\n# continue\n \n if positiveRatings <= 0 or totalRatings <= 0:\n continue\n \n if 1:\n #confidence = 1 - confidence\n maxPredictionRating = 0.99 # possible DNA\n #maxPredictionRating = 1 # possible DNA\n \n if confidence > maxPredictionRating: confidence = maxPredictionRating\n if confidence < 0.0: confidence = 0.0\n\n ratings = rps.binconf(positiveRatings, negativeRatings, confidence)\n #ratings = binconf(positiveRatings, negativeRatings, confidence)\n \n if higherBound:\n rating = float(ratings[1])\n else:\n rating = float(ratings[0])\n \n #rating += (ratings[1] - ratings[0]) / 2\n \n if math.isnan(rating): rating = 0\n \n rating = round(rating,3) # fix for conversion from C float to Python float \n else:\n maxPredictionRating = 0.99 # possible DNA\n #maxPredictionRating = 1 # possible DNA\n if confidence > maxPredictionRating: confidence = maxPredictionRating\n if confidence < 0.0: confidence = 0.0\n \n #z = 1.96 # hardcorded for confidence=95%\n #z = 1.0 # 1.44=85% 1.96=95%\n p = 1 - 0.5 * (1 - confidence)\n z = cached_normcdfi(p)\n #z = rps.normcdfi(p)\n \n phat = float(positiveRatings) / totalRatings\n n = totalRatings\n \n rating = (phat + z*z/(2*n) - z * math.sqrt((phat*(1-phat)+z*z/(4*n))/n))/(1+z*z/n)\n \n #rating = round(rating, 3) # round to the nearest 3 decimals. experiment\n \n predictor.rankingConfidence = rating\n predictorScores.append((rating, predictor))\n\n if len(predictorScores) > 1:\n # filter out predictors that does not tie with the maximum rating, for optimization purposes\n maxRating = max(predictorScores, key=lambda i: i[0])[0]\n p = [p for p in predictorScores if p[0] == maxRating]\n\n if predictorScores[0] != maxRating:\n assert(\"Something is wrong. We filtered out predictions that is not the maximum but we got some here\") \n \n predictorScores = p\n elif len(predictorScores) == 1:\n rating, chosenPredictor = predictorScores[0]\n return chosenPredictor, rating\n else:\n random = rps.random() % len(filteredPredictors)\n chosenPredictor = filteredPredictors[random]\n rating = 0\n return chosenPredictor, rating\n \n # there are multiple predictors with the same rating.\n # let's choose the one with the biggest score (positive - negative)\n if useScoreBuffer == False:\n highestScorers = max(predictorScores, key=lambda i: i[1].scoreWins)\n else:\n highestScorers = max(predictorScores, key=lambda i: i[1].scoreBuffer.count(scoreBufferWin))\n predictorScores = [p for p in predictorScores if p[0] == highestScorers[0]]\n\n # tally the moves and choose the move with the most tally\n \n tally = [0, 0, 0]\n for p in predictorScores:\n # tally[p[1].moveLastTurn] += 1\n if p[1].moveLastTurn == 0: tally[0] += 1\n if p[1].moveLastTurn == 1: tally[1] += 1\n if p[1].moveLastTurn == 2: tally[2] += 1\n \n # let's choose a move at random between them \n # Filter predictorScores to only include the predictors with the maximum tally.\n maxTally = max(tally)\n talliedScorers = []\n if tally[0] == maxTally: \n rocks = [talliedScorers.append(p) for p in predictorScores if p[1].moveLastTurn == 0]\n if tally[1] == maxTally: \n papers = [talliedScorers.append(p) for p in predictorScores if p[1].moveLastTurn == 1]\n if tally[2] == maxTally: \n scissors = [talliedScorers.append(p) for p in predictorScores if p[1].moveLastTurn == 2] \n \n if len(talliedScorers) == 1:\n # in practice, this doesn't happen, but we put in this option to try to minimize bugs\n rating, chosenPredictor = talliedScorers[0]\n else:\n # play the move with the highest score\n finalChoice = None\n \n if tally[0] and tally[0] > tally[1] and tally[0] > tally[2]:\n Rmoves = [p for p in talliedScorers if p[1].moveLastTurn == 0]\n finalChoice = Rmoves[0]\n elif tally[1] and tally[1] > tally[0] and tally[1] > tally[2]:\n Pmoves = [p for p in talliedScorers if p[1].moveLastTurn == 1]\n finalChoice = Pmoves[0]\n elif tally[2] and tally[2] > tally[0] and tally[2] > tally[1]:\n Smoves = [p for p in talliedScorers if p[1].moveLastTurn == 2]\n finalChoice = Smoves[0]\n else: \n # there are still ties so we choose at random\n random = rps.random() % len(talliedScorers)\n finalChoice = talliedScorers[random]\n \n chosenPredictor = finalChoice[1]\n rating = finalChoice[0] \n \n if Debug:\n currentTurn = rps.getTurn()\n print(\"currentTurn\", currentTurn)\n for p in talliedScorers:\n print (\"%s (%i) Wilson Rating: %.3f. Confidence: %.3f Score +%i/-%i\" % (p[1].name, p[1].moveLastTurn, p[0], p[1].confidence, p[1].scoreWins, p[1].scoreLosts))\n \n input() \n\n return chosenPredictor, rating", "def variable_ranking(self):\n self.grow_trees()\n dist_classes = self.dist_classes\n oob = self.forest.oob_set_generator()\n oob_length, First, elt_vals, var_vals = len(oob), True, {}, {}\n succ_rate, dist_succ_rate, dist_order = 0, 0, 0\n for var in self.variables:\n var_range = list(variable_range(self.data, var))\n range_len = len(var_range)\n print var\n permution = None\n permuted_succ, perm_dist_succ = 0, 0\n for elts in oob:\n if First:\n actual = self.data[elts][self.prediction_index]\n elt_vals[elts] = actual\n predicted = self.forest.test_predict(self.data[elts], elts)\n if actual in dist_classes:\n dist_order += 1\n if actual == predicted:\n succ_rate += 1\n if actual in dist_classes:\n dist_succ_rate += 1\n if var[1] == 'd':\n permution = int(math.floor(uniform(0, 1)*range_len))\n permution = var_range[permution]\n else:\n permution = uniform(0, 1)*(var_range[1] - var_range[0])\n perm_tuple = self.data[elts][:var[0]] + [permution] + self.data[elts][var[0]+1:]\n permuted_prediction = self.forest.predict(perm_tuple)\n actual = elt_vals[elts]\n if actual == permuted_prediction:\n permuted_succ += 1\n if actual in dist_classes:\n perm_dist_succ += 1\n if First:\n succ_rate = float(succ_rate)/oob_length\n dist_succ_rate = float(dist_succ_rate)/dist_order\n First = False\n permuted_succ = float(permuted_succ)/oob_length\n perm_dist_succ = float(perm_dist_succ)/dist_order\n print \"Originally a \", succ_rate, \" success rate, with permution to \", permuted_succ\n print \"A difference of \", succ_rate - permuted_succ\n print \"WRT Distinguised classes, a success rate of:\", dist_succ_rate, 'with permution to ', perm_dist_succ\n print \"A difference of \", dist_succ_rate - perm_dist_succ\n var_vals[var] = succ_rate - permuted_succ\n var_vals[(var, 'd')] = dist_succ_rate - perm_dist_succ\n var_vals = sorted(var_vals.items(), key=lambda x: x[1], reverse=True)\n for x in var_vals:\n print x[0], x[1]", "def get_rank() -> int:\n return collective.get_rank()", "def evaluateAll(population: list):\n worst = 0\n best = sys.maxsize\n sum = 0\n probabilites = []\n for i in range(len(population)):\n eval = population[i][1]\n if eval > worst:\n worst = eval\n if eval < best:\n best = eval\n for j in range(len(population)):\n fitness = updateFitness(population[j], worst)\n sum += fitness\n for k in range(len(population)):\n prob = updateProb(population[k], sum)\n probabilites.append(prob)\n\n print(\"worst chromosome makespan:\", worst, \"best chromosome makespan:\",best,file=out_file)\n return probabilites", "def quaternary_tournament(population, scores, next_gen_number, random_seed=42):\n\n np.random.seed(random_seed)\n\n indices = list(range(len(population)))\n indices_array = np.array(indices)\n\n selected = []\n for i in range(next_gen_number):\n best_score = math.inf\n picked = None\n selected_indices = np.random.choice(indices_array, size=4)\n\n for indx in selected_indices:\n if scores[indx] < best_score:\n best_score = scores[indx]\n picked = population[indx]\n\n selected.append(picked)\n\n return selected", "def rank_name_generator(name):\n roman_numbers = [\"I\", \"II\", \"III\", \"IV\", \"V\", \"VI\", \"VII\"]\n ranks = [\"{} {}\".format(name, n) for n in roman_numbers]\n return ranks", "def personal_best(scores):\n# return sorted(scores, reverse=True)[0]\n return max(scores)", "def unrank(self, rank, n):\n n, rank = as_int(n), as_int(rank)\n L = defaultdict(int)\n for i in range(n - 3, -1, -1):\n L[i] = rank % n\n rank = (rank - L[i])//n\n return Prufer([L[i] for i in range(len(L))])", "def rank(self):\n \n if self.__rank:\n return self.__rank\n flush = True\n straight = False\n last = None\n merged = {}\n for c in self.__cards:\n if last:\n if flush and c.suit != last.suit:\n flush = False\n last = c\n if c.value in merged:\n merged[c.value] = merged[c.value] + 1\n else:\n merged[c.value] = 1\n if (len(merged)) == 5:\n # All unique cards, check for a straight\n if self.__cards[0].value - self.__cards[4].value == 4:\n straight = True\n if self.__cards[4].value == 2 and self.__cards[1].value == 5 and self.__cards[0].value == 14:\n straight = True\n # Set the value of the ace to 1 and resort so hand comparisons work correctly\n self.__cards[0].value = 1\n self.__cards = sorted(self.__cards, reverse=True)\n if straight and flush:\n if self.__cards[0].value == 14:\n self.__rank = Hand.ROYAL_FLUSH\n else:\n self.__rank = Hand.STRAIGHT_FLUSH\n elif flush:\n self.__rank = Hand.FLUSH\n elif straight:\n self.__rank = Hand.STRAIGHT\n else:\n self.__rank = Hand.HIGH_CARD\n self.__values = [c.value for c in self.__cards]\n else:\n multiples = [m for m in sorted(merged.items(), key = operator.itemgetter(1), reverse = True) if m[1] > 1]\n if len(multiples) > 1:\n if multiples[0][1] == multiples[1][1]:\n self.__rank = Hand.TWO_PAIRS\n else:\n self.__rank = Hand.FULL_HOUSE \n elif multiples:\n if multiples[0][1] > 3:\n self.__rank = Hand.FOUR_OF_A_KIND\n elif multiples[0][1] == 3:\n self.__rank = Hand.THREE_OF_A_KIND\n else:\n self.__rank = Hand.ONE_PAIR\n mvalues = sorted([m[0] for m in multiples], reverse=True)\n self.__values = mvalues + [c.value for c in self.__cards if c.value not in mvalues]\n if not self.__rank:\n self.__rank = Hand.HIGH_CARD\n\n return self.__rank", "def prufer_rank(self):\n r = 0\n p = 1\n for i in range(self.nodes - 3, -1, -1):\n r += p*self.prufer_repr[i]\n p *= self.nodes\n return r", "def spearman(mystery_ranks, language_ranks):\n\tspearman_numbers = [] \n\tfor language in language_ranks:\n\t\tnumber = spearman_correlation(language, mystery_ranks)\n\t\tspearman_numbers.append(number)\n\n\treturn spearman_numbers", "def inverse_rank_norm(values):\n #values = pd.Series([5, 7, 2, 1, 1])\n quantiles = (values.rank()-0.5)/(len(values))\n return ss.norm.ppf(quantiles)", "def rank_candidates(table):\n ranking = []\n\n # get list of all candidates who received a vote\n full_list = elim_dupe([name for vote in table for name in vote])\n # print full_list\n \n while len(ranking) < len(full_list):\n \n # All unranked candidates are considered eligible\n eligible = [name for name in full_list if name not in ranking]\n \n while True:\n \n # Remove ineligible and eliminated candidates from votes\n temp_ballots = [[name for name in vote if name in eligible] for vote in table]\n \n # If no candidates on the ballot are eligible and the ballot does not have\n # \"no confidence\" written on it, the ballot is discarded and not considered a vote.\n temp_ballots = [vote for vote in temp_ballots if len(vote) > 0]\n\n total_votes = len(temp_ballots)\n\n if total_votes == 0:\n return ranking\n\n top_choices = [vote[0] for vote in temp_ballots]\n \n # All ballots are considered to be a vote for the\n # highest-ranked eligible candidate on the ballot.\n vote_count = {name: top_choices.count(name) for name in eligible}\n print vote_count\n winner = [k for k in vote_count if (vote_count[k]*2) > total_votes]\n\n if len(winner) > 0:\n # If a single candidate has a majority of the\n # votes, they receive the next highest ranking\n if winner[0] == NO_CONFIDENCE:\n return ranking\n \n ranking += winner\n \n break;\n\n vote_count.pop(NO_CONFIDENCE, None)\n\n # If no single candidate has a majority of the votes,\n # then one will be deemed ineligible.\n\n min_votes = vote_count[min(vote_count, key=vote_count.get)]\n \n least_voted = {k:vote_count[k] for k in vote_count if vote_count[k] == min_votes}\n \n # If a single candidate has the least amount of votes, they become ineligible,\n while len(least_voted) > 1:\n temp_ballots = [vote[1:] for vote in temp_ballots if len(vote[1:]) > 0]\n if len(temp_ballots) == 0:\n return ranking\n next_choices = [vote[0] for vote in temp_ballots if vote[0] in least_voted]\n least_voted = {name: (next_choices.count(name) + least_voted[name]) for name in least_voted}\n min_votes = least_voted[min(least_voted, key=least_voted.get)]\n least_voted = {k: least_voted[k] for k in least_voted if least_voted[k] == min_votes}\n \n remove = least_voted.keys()[0]\n eligible = [name for name in eligible if name != remove]\n\n\n return ranking", "def rank_members(game_obj, team_name):\n clear_rating(game_obj)\n team = getattr(game_obj, team_name)\n pool = game_obj.pool\n map = game_obj.map\n for member in pool.members:\n for dimension in dimension_list:\n if getattr(team, dimension) < getattr(map, dimension):\n setattr(member, 'rating', getattr(member, 'rating') + getattr(member, dimension))\n else:\n setattr(member, 'rating', getattr(member, 'rating') + (getattr(member, dimension) * 0.2))\n return game_obj.pool.max_members()", "def getHighestRank_Toilet(self):\n\n # filter out low confidences\n #maxConfidence = max(self.Predictors, key=operator.attrgetter('confidence'))\n #p = [p for p in self.Predictors if p.confidence == maxConfidence]\n \n \n p = self.Predictors\n \n if len(p) == 1:\n # only one predictor has high confidence\n chosenPredictor = p[0]\n elif len(p) > 1:\n random.shuffle(p, random = rps.randomRange)\n \n # drop the first 37% and grab the best \n drop = round(len(p) * 0.37) - 1\n initial = p[:drop]\n maxConfidence = max(initial, key=operator.attrgetter('confidence'))\n maxConfidence = maxConfidence.confidence\n \n toCheck = p[drop:]\n for p in toCheck:\n if p.confidence >= maxConfidence:\n chosenPredictor = p\n break\n else:\n chosenPredictor = toCheck[-1]\n \n rankConfidence = chosenPredictor.confidence\n return chosenPredictor, rankConfidence", "def _sub_ranker_top(self,numbers_frequency):\n rank=2\n\n if max(numbers_frequency.loc[numbers_frequency>0].index) == 14:\n rank=1\n\n return rank", "def reranked_edges(self, edges):\n return sorted(self.rescored_edges(edges), key=lambda edge: edge.score, reverse=True)", "def gf2_rank(rows):\r\n rank = 0\r\n while rows:\r\n pivot_row = rows.pop()\r\n if pivot_row:\r\n rank += 1\r\n lsb = pivot_row & -pivot_row\r\n for index, row in enumerate(rows):\r\n if row & lsb:\r\n rows[index] = row ^ pivot_row\r\n return rank", "def sim_ranks(query, database):\n distance = compute_sim(query, database)\n return np.argsort(-distance, axis=0)", "def source_rankings_alexa_rank_max(self, source_rankings_alexa_rank_max):\n\n self._source_rankings_alexa_rank_max = source_rankings_alexa_rank_max", "def rankPairs (self):\n def key (matrix, pair):\n # majority is positive, we want larger ones first\n major = matrix[pair[0]][pair[1]]\n # minority is negative because we want the smaller ones first\n minor = -1*matrix[pair[1]][pair[0]]\n return (major,minor)\n\n self.pairs = [(x,y) for x in self.poller.candidates for y in self.poller.candidates if x != y]\n matrix = self.poller.voteMatrix()\n # reverse=true to indicate descending sort\n self.pairs.sort(key=lambda pair: key(matrix,pair), reverse=True)\n self.weights = { pair : key(matrix,pair) for pair in self.pairs }\n self.pairs = [pair for pair in self.pairs if self.weights[pair][0] > -1*self.weights[pair][1]]", "def as_paired_ranks(x, y):\n n = len(x)\n paired = zip(x,y)\n x = list(x)\n y = list(y)\n x.sort()\n y.sort()\n rank_val_map_x = dict(zip(x, range(n)))\n rank_val_map_y = dict(zip(y, range(n)))\n ranked = []\n for i in range(n):\n ranked += [[rank_val_map_x[paired[i][0]], rank_val_map_y[paired[i][1]]]]\n return ranked" ]
[ "0.673976", "0.67047715", "0.6614033", "0.6538433", "0.6468819", "0.63788843", "0.63333786", "0.63292336", "0.62728506", "0.62722045", "0.62637275", "0.6252744", "0.6246774", "0.6220911", "0.621737", "0.6209077", "0.618397", "0.6162317", "0.6145225", "0.6116494", "0.61029476", "0.60788256", "0.6077375", "0.6055757", "0.60540545", "0.60349864", "0.6001205", "0.5992214", "0.59857064", "0.5953704", "0.5938026", "0.58979", "0.5882655", "0.5878872", "0.5869238", "0.58633876", "0.58574706", "0.58491325", "0.5829578", "0.5828106", "0.5815577", "0.5803888", "0.58012855", "0.5780951", "0.5774257", "0.57680684", "0.5763373", "0.57585895", "0.57581365", "0.57023156", "0.5702088", "0.56956244", "0.5688567", "0.5679116", "0.5666397", "0.56641996", "0.5664088", "0.5662325", "0.5654389", "0.5650522", "0.5641263", "0.5635652", "0.56335807", "0.5632932", "0.56217355", "0.56179756", "0.56114125", "0.5589398", "0.5589371", "0.5584669", "0.557954", "0.55782235", "0.557729", "0.55663514", "0.5563921", "0.55623066", "0.55588055", "0.55556464", "0.55405957", "0.5534868", "0.55328965", "0.55293953", "0.55252886", "0.5518335", "0.55123246", "0.5505703", "0.5501361", "0.55007005", "0.54984945", "0.54976857", "0.5488927", "0.54830754", "0.5480732", "0.54765457", "0.546107", "0.54555124", "0.5443303", "0.5441092", "0.5441003", "0.54283863" ]
0.76253545
0
Get the rank for a given number of points
def get_rank(points: int, cutoffs: List[int]) -> int: rank = 0 for i, cutoff in enumerate(cutoffs): if points < cutoff: if i == 0: break else: rank = i - 1 break else: rank = RANK_COUNT - 1 return rank
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_rank(self, points):\n sql_command = \"SELECT * FROM points WHERE amount > ?;\"\n cursor, connection = self.execute_command_get_connection(sql_command, [points])\n\n all = cursor.fetchall()\n cursor.close()\n connection.close()\n return len(all) + 1", "def rank():\n return 0", "def _get_rank(self,fitness):\n # infact you can get the order or rank by only once sort.\n rank=fitness[:,0].argsort().argsort() # [n]\n return rank", "def _rank(self, ranking, n):\n return nlargest(n, ranking, key=ranking.get)", "def rank(self):\n return self.lib.calculate_rank()", "def _rank(self, ranking, n):\n return nlargest(n, ranking, key=ranking.get)", "def get_ranked_points(zpoints, dsq):\n pos_map = calc_positions(zpoints, dsq)\n rpoints = calc_ranked_points(pos_map, dsq)\n return rpoints", "def rank() -> int:\n return dist.get_rank() if dist.is_initialized() else 0", "def get_rank(self) -> int:\r\n return self.rank", "def get_rank(self) -> int:\n return dist.get_rank()", "def _rank(self):\r\n return sorted(self.player_points.items(),key=lambda x:x[1],reverse=True)", "def __rank__(self) -> int:", "def rank(self):\n rank = 0\n rho = self.array_form[:]\n n = self.size - 1\n size = n + 1\n psize = int(ifac(n))\n for j in xrange(size - 1):\n rank += rho[j]*psize\n for i in xrange(j + 1, size):\n if rho[i] > rho[j]:\n rho[i] -= 1\n psize //= n\n n -= 1\n return rank", "def get_rank() -> int:\n return collective.get_rank()", "def get_ranks(d): \n raise NotImplementedError(\"Problem 3 Incomplete\")", "def points(self):\r\n\t\tif self.rank() in self.point_sysm:\r\n\t\t\treturn self.point_sysm[self.rank()]\r\n\t\telse:\r\n\t\t\treturn (self.rank() + 2)", "def get_num_hit_rank(boxes_truth, boxes_pred, rank):\n\n def is_hit(box_truth, box_pred):\n return is_label_match_rank(box_truth, box_pred, rank)\n\n return get_num_hit(boxes_truth, boxes_pred, is_hit)", "def prufer_rank(self):\n r = 0\n p = 1\n for i in range(self.nodes - 3, -1, -1):\n r += p*self.prufer_repr[i]\n p *= self.nodes\n return r", "def get_rank(self, score, answer, entities_space, num_ent):\n if answer not in entities_space:\n rank = num_ent\n else:\n answer_prob = score[entities_space.index(answer)]\n score.sort(reverse=True)\n rank = score.index(answer_prob) + 1\n return rank", "def get_rank(self):\r\n return self.rank", "def get_hs_rank(self, points):\n p = points\n rank = self.RANKS\n while p > 0 and rank > 0:\n p = p - self.BASE * math.pow(self.FACTOR, (self.RANKS - rank))\n rank = rank - 1\n\n if rank > 0:\n return str(rank)\n else:\n return str(self.get_rank(points)) + \" Legend\"", "def get_rank(self):\n return self.rank", "def get_rank(self):\n return int(self._rank)", "def points(self):\r\n\t\tif self.rank() >= 9:\r\n\t\t\treturn self.point_sysm[self.rank()]\r\n\t\telse:\r\n\t\t\treturn 0", "def rank(self):\r\n\t\trank = self.n % 13\r\n\t\treturn rank", "def getRank(self):\r\n return self.rank", "def get_rank(self):\n return self.__rank", "def get_rank(self, pb):\n\n for rank in self.RANKS:\n start = self.RANKS[rank][\"ProgressStart\"]\n # 1 is not subtracted as we're calling range\n end = start + self.RANKS[rank][\"Progress\"]\n if pb in range(start, end):\n return int(rank)\n else:\n return 35", "def rank_in_club(user, club):\n posel_ids = [p.id for p in club.posel_set.all()]\n return rank(user, posel_ids)", "def rank(self) -> tskit.Rank:\n return combinatorics.RankTree.from_tsk_tree(self).rank()", "def findRank(e, values):\n\tcount = 1\n\tfor ve in values:\n\t\tif ve < e:\n\t\t\tcount += 1\n\treturn count", "def getPoints(self):\n count = 0\n for card in self.cards:\n if card.rank > 9:\n count += 10\n elif card.rank == 1:\n count += 11\n else:\n count += card.rank\n # Deduct 10 if Ace is available and needed as 1\n for card in self.cards:\n if count <= 21:\n break\n elif card.rank == 1:\n count -= 10\n return count", "def getRank(self):\n return self.rank", "def _rank_2_n(my_type: LieType, lie_rank: Nat) -> Nat:\n if my_type is LieType.A:\n # A lie_rank corresponds to SL(lie_rank+1)\n n_val = lie_rank+1\n if lie_rank < 1:\n raise ValueError(\"Rank is too small\")\n elif my_type is LieType.B:\n # B lie_rank corresponds to SO(2*lie_rank+1)\n n_val = 2*lie_rank+1\n if lie_rank < 2:\n raise ValueError(\"Rank is too small\")\n elif my_type is LieType.C:\n # C lie_rank corresponds to Sp(2*lie_rank)\n n_val = 2*lie_rank\n if lie_rank < 2:\n raise ValueError(\"Rank is too small\")\n elif my_type is LieType.D:\n # D lie_rank corresponds to SO(2*lie_rank)\n n_val = 2*lie_rank\n if lie_rank < 3:\n raise ValueError(\"Rank is too small\")\n else:\n raise ValueError(\n \"Lie type must be one of the 4 classical families\")\n return n_val", "def rank(self):\n return self.matrix().rank()", "def determine_rank(self, X, err):\n singularValues,_,_,_ = self.compute_svd(X,k=-1)\n ratio = np.array([np.linalg.norm(singularValues[k:]) / np.linalg.norm(singularValues) for k in\n range(len(singularValues) - 1, 0, -1)])\n find_idx = numpy.nonzero(ratio <= err)\n rank = find_idx[0]\n if self.global_rank==0: print('Estimated rank=',rank)\n return rank", "def rank(self):\n if self._rank is None:\n self._rank = self.prufer_rank()\n return self._rank", "def rank(self) -> int:\n return self._rank", "def getRank(self):\n return self._rank", "def rank(self, current_order_by_value: Comparable, current_row_number: int) -> int:", "def workout_rank(a, rank):\r\n # Check if workout score is empty\r\n if pd.isnull(a):\r\n return np.nan\r\n else:\r\n return int(rank)", "def spatial_rank(self) -> int:\n return self.shape.spatial.rank", "def rank(self, value):\n i = 0\n n = len(self._tree)\n rank = 0\n count = 0\n while i < n:\n cur = self._tree[i]\n if value < cur:\n i = 2 * i + 1\n continue\n elif value > cur:\n rank += self._counts[i]\n # subtract off the right tree if exists\n nexti = 2 * i + 2\n if nexti < n:\n rank -= self._counts[nexti]\n i = nexti\n continue\n else:\n return (rank, count)\n else: # value == cur\n count = self._counts[i]\n lefti = 2 * i + 1\n if lefti < n:\n nleft = self._counts[lefti]\n count -= nleft\n rank += nleft\n righti = lefti + 1\n if righti < n:\n count -= self._counts[righti]\n return (rank, count)\n return (rank, count)", "def rank(self, value):\n i = 0\n n = len(self._tree)\n rank = 0\n count = 0\n while i < n:\n cur = self._tree[i]\n if value < cur:\n i = 2 * i + 1\n continue\n elif value > cur:\n rank += self._counts[i]\n # subtract off the right tree if exists\n nexti = 2 * i + 2\n if nexti < n:\n rank -= self._counts[nexti]\n i = nexti\n continue\n else:\n return (rank, count)\n else: # value == cur\n count = self._counts[i]\n lefti = 2 * i + 1\n if lefti < n:\n nleft = self._counts[lefti]\n count -= nleft\n rank += nleft\n righti = lefti + 1\n if righti < n:\n count -= self._counts[righti]\n return (rank, count)\n return (rank, count)", "def rank(self):\n return self._rank", "def rank(self):\n return self._rank", "def rank(self):\n return self._rank", "def rank(self):\n return self._rank", "def rank(self):\n return self._rank", "def rank(x):\n x = asanyarray(x)\n\n if x.dtype.type is float64:\n value = _rank(x)\n elif x.dtype.type is float32:\n value = _rankf(x)\n else:\n raise TypeError(\"%s not in (float64, float32)\" % x.dtype)\n\n return value", "def sum_points(self) -> int:\n return sum([card.rank_value for card in self.deck.cards])", "def rankNeighbors(Data):\r\n strokeDist = []\r\n for i in range(len(Data)):\r\n strokeDist.append([])\r\n index = 0\r\n for point1 in Data:\r\n dist = []\r\n index1=0\r\n for point2 in Data:\r\n #dist.append(math.sqrt((center1[0]-center2[0])**2+(center1[1]-center2[1])**2))\r\n dist.append((index1,math.sqrt((point1[0]-point2[0])**2+(point1[1]-point2[1])**2+(point1[2]-point2[2])**2)))\r\n index1+=1\r\n #x = copy.deepcopy(dist)\r\n #print(x)\r\n dist.sort(key= lambda x:x[1])\r\n #print(x)\r\n # Get rank for each element\r\n idx1 =0\r\n for e in dist:\r\n #i = x.index(e)\r\n strokeDist[index].append(e)\r\n idx1 +=1\r\n index+=1\r\n return strokeDist", "def find_ranking(game_id):\r\n\r\n scores = []\r\n\r\n games = Game.query.all()\r\n\r\n target_score = get_avg_score(game_id)\r\n\r\n for game in games:\r\n scores.append(get_avg_score(game.game_id))\r\n \r\n rankings = sorted(scores, key=None,reverse=True)\r\n\r\n target_ranking = rankings.index(target_score)\r\n\r\n return target_ranking + 1", "def calc_rank(id=13197473):\r\n player_url = urllib.parse.urlparse(\"http://osu.ppy.sh/pages/include/profile-general.php?u=player_id&m=0\".replace('player_id', str(id)))\r\n page = urlopen(player_url.geturl())\r\n soup = BeautifulSoup(page, features=\"html.parser\")\r\n table_divs = soup.findAll('div', attrs={'class': 'profileStatLine'})\r\n\r\n import re\r\n pattern = '\\(#\\d*,*\\d+\\)'\r\n for div in table_divs:\r\n for childdiv in div.find_all('b'):\r\n result = re.search(pattern, str(childdiv.text))\r\n my_ranking = int(result.group(0).replace(',', '').replace(\"(#\", '').replace(\")\", ''))\r\n break\r\n break\r\n return my_ranking", "def get_rank(score):\n if score in range(0, 500):\n return RANKTYPES[0]\n elif score in range(500, 1500):\n return RANKTYPES[1]\n elif score in range(1500, 2000):\n return RANKTYPES[2]\n elif score in range(2000, 2500):\n return RANKTYPES[3]\n elif score in range(2500, 3000):\n return RANKTYPES[4]\n elif score in range(3000, 4000):\n return RANKTYPES[5]\n elif score in range(4000, 5500):\n return RANKTYPES[6]\n elif score > 5500:\n return RANKTYPES[7]", "def recip_rank(recs, truth):\n good = recs['item'].isin(truth.index)\n npz, = np.nonzero(good)\n if len(npz):\n return 1.0 / (npz[0] + 1.0)\n else:\n return 0.0", "def rank():\n return int(os.environ['RANK'])", "def ranking(orig_data):\n data = np.copy(orig_data)\n values = np.sort(data)\n rank = np.zeros(data.shape)\n r = 0\n for i in range(values.shape[0]):\n for j in range(data.shape[0]):\n if data[j] == values[i]:\n rank[j] = r\n data[j] = 9223372036854775807 # MaxInt\n break\n if i < values.shape[0]-1 and values[i] < values[i+1]:\n r = i + 1\n return rank", "def get_rank_probabilities(n: int) -> List[float]:\n alpha = 3.5\n ranks = [1 / i**alpha for i in range(1, n + 1)]\n\n return [r / sum(ranks) for r in ranks]", "def get_rank(array: Union[np.ndarray, List]) -> int:\n return len(array) - np.argsort(array).argsort()", "def _rank(measure):\n sort_idx = np.argsort(-measure)\n ranks = np.empty(len(measure), int)\n ranks[sort_idx] = np.arange(1, len(measure)+1)\n return ranks", "def rank_transform(X):\n return np.apply_along_axis(scipy.stats.rankdata, 0, X)", "def rank(x: np.ndarray):\n assert x.ndim == 1\n ranks = np.empty(len(x), dtype=int)\n ranks[x.argsort()] = np.arange(len(x))\n return ranks", "def getRank(self, steamid):\r\n if self.__contains__(steamid):\r\n return self.ranks.index(steamid) + 1\r\n return self.__len__()", "def get_rank(self):\n \n if self.rank == None:\n self.rank = self.main_ranker(self.string)\n \n return self.rank", "def rank_features(features):\n keys_order = features.keys()\n # make into array\n # ft_array = np.stack(tuple([ft for ft in features.values()]))\n rank = weight_function(\n loudness=features['loudness'],\n peak_num=features['peak_num'],\n means=features['means'],\n means_trend=features['means_trend'],\n peaks_trend=features['peaks_trend'],\n )\n return rank", "def _compute_relative_leaderboard_indexes(ranking, size):\n if ranking == 0 or ranking == 1:\n return (0, 5)\n elif ranking == size or ranking == size-1:\n return (max(0, size-5), size)\n else:\n return (max(0, ranking-2), max(size, ranking+3))", "def determine_winner1(self): \r\n sorted_player_rank = self._rank()\r\n print(f\"sorted player rank: {sorted_player_rank}\")\r\n print(f\"winner is player {sorted_player_rank[0]}: with points {sorted_player_rank[0][1]}\")", "def _get_local_rank_size(comm):\n this_node = platform.node()\n ranks_nodes = comm.allgather((comm.Get_rank(), this_node))\n node2rankssofar = collections.defaultdict(int)\n local_rank = None\n for (rank, node) in ranks_nodes:\n if rank == comm.Get_rank():\n local_rank = node2rankssofar[node]\n node2rankssofar[node] += 1\n assert local_rank is not None\n return local_rank, node2rankssofar[this_node]", "def rank(self):\n\n if self._rank >= 0:\n return self._rank\n\n reduced, operations = self.to_row_echelon()\n non_leading_rows = 0\n for i in range(self.rows, 0, -1):\n if not reduce(lambda x,y: x or y, reduced.row(i)):\n non_leading_rows += 1\n else:\n break\n\n self._rank = self.rows - non_leading_rows\n return self._rank", "def spearman_rank_unique(X,Y,n):\n\n rank_X = get_rank(X, n)\n rank_Y = get_rank(Y, n)\n\n twos = [2]*n\n diff = map(exponent, map(subtract, rank_X, rank_Y), twos)\n\n return 1 - 6*float(sum(diff))/(n*(n**2 - 1))", "def calc_ranked_points(pos_map, dsq_list):\n\n rpoints = {}\n\n for pos, zones in pos_map.items():\n # remove any that are dsqaulified\n # note that we do this before working out the ties, so that any\n # dsq tie members are removed from contention\n zones = [ z for z in zones if z not in dsq_list ]\n if len(zones) == 0:\n continue\n\n # max points is 4, add one because pos is 1-indexed\n points = (4 + 1) - pos\n # Now that we have the value for this position if it were not a tie,\n # we need to allow for ties. In case of a tie, the available points\n # for all the places used are shared by all those thus placed.\n # Eg: three first places get 3pts each (4+3+2)/3.\n # Rather than generate a list and average it, it's quicker to just\n # do some maths using the max value and the length of the list\n points = points - ( (len(zones) - 1) / 2.0 )\n for z in zones:\n rpoints[z] = points\n\n # those that were dsq get 0\n for z in dsq_list:\n rpoints[z] = 0.0\n\n return rpoints", "def reciprocal_rank(ranking, references, atk=None):\n for k, prediction in enumerate(ranking[:atk], 1):\n if prediction in references:\n return 1.0 / k\n return 0.0", "def test_rank(self):\n self.assertEqual(self.vectors.rank('dog.n.01', 'dog.n.01'), 1)\n self.assertEqual(self.vectors.rank('dog.n.01', 'carnivore.n.01'), 3)", "def kind(n, ranks):\r\n for i in ranks:\r\n if n == ranks.count(i):\r\n return i", "def mrr_at_k(self, positions, k, num_samples):\n # positions_at_k = [p for p in positions if p <= k]\n positions_at_k = [p if p <= k else 0 for p in positions]\n rrank = 0.0\n for pos in positions_at_k:\n if pos != 0:\n rrank += 1.0 / pos\n\n return rrank / num_samples", "def alexa_rank(url):\n return ALEXA_MAP.index(url)", "def get_maximum_rank(score):\n\tscores = [0, 300, 450, 600, 750] # ranges 0-299, 300-449, etc.\n\trank = None\n\tfor i in range(len(scores)):\n\t\tif score >= scores[i]:\n\t\t\trank = i + 1\n\n\treturn rank", "def _compute_ranks(df, lower_better=True):\n # return df.rank(axis=1, numeric_only=True, ascending=lower_better)\n return df.rank(axis=1, numeric_only=True, ascending=lower_better, method='min')", "def ranks(inputs, axis=-1):\n return 1 + tf.cast(\n tf.argsort(tf.argsort(inputs, axis=axis), axis=axis), dtype=inputs.dtype)", "def rank_yx(self, rankyx, rank_to_yx=1):\r\n if rank_to_yx == 1:\r\n x = int(rankyx) % int(self.shapes[1])\r\n y = (rankyx - x) / int(self.shapes[1])\r\n return [y, x] # More convenient to return y, x\r\n \r\n if rank_to_yx == 0: # that means transform yx to rank, expecting rankyx to be a list, may not be necessary\r\n rankyx = rankyx[0] * int(self.shapes[1]) + rankyx[1]\r\n return rankyx # returns back a float\r", "def score_ap_from_ranks_1(ranks, nres):\n\n # accumulate trapezoids in PR-plot\n ap = 0.0\n\n # All have an x-size of:\n recall_step = 1.0 / nres\n\n for ntp, rank in enumerate(ranks):\n\n # y-size on left side of trapezoid:\n # ntp = nb of true positives so far\n # rank = nb of retrieved items so far\n if rank == 0:\n precision_0 = 1.0\n else:\n precision_0 = ntp / float(rank)\n\n # y-size on right side of trapezoid:\n # ntp and rank are increased by one\n precision_1 = (ntp + 1) / float(rank + 1)\n\n ap += (precision_1 + precision_0) * recall_step / 2.0\n\n return ap", "def precision_recall_from_ranking(ranking, position):\n if position == 0:\n precision = 1.0\n recall = 0.0\n else:\n ranking = np.array(ranking)\n precision = (ranking[:position] == 1).sum() / position\n recall = (ranking[:position] == 1).sum() / (ranking == 1).sum()\n return precision, recall", "def rank(userid, args):\r\n testUserid = userid\r\n if len(args):\r\n testUserid = es.getuserid(str(args))\r\n if not es.exists('userid', testUserid):\r\n testUserid = userid\r\n player = players[testUserid]\r\n tokens = {}\r\n tokens['name'] = player['name']\r\n tokens['level'] = player['level']\r\n tokens['xp'] = player['xp']\r\n tokens['nextxp'] = (player['level'] - 1) * int(xpIncrement) + int(startXp)\r\n tokens['credits'] = player['credits']\r\n tokens['rank'] = ranks.getRank(player['steamid'])\r\n tokens['total'] = len( ranks )\r\n for tellUserid in es.getUseridList():\r\n tell(tellUserid, 'rank', tokens)", "def local_rank():\n return int(os.environ['LOCAL_RANK'])", "def rank(self):\n msg = \"Use .ndim instead\"\n warnings.warn(msg, FutureWarning)\n return self._hist.rank()", "def competitionRanking(groups, setRank):\n rank = 1\n for k, g in groups:\n cnt = 0\n for item in g:\n setRank(item, rank)\n cnt += 1\n rank += cnt", "def group_rank(self):\n return self._grank", "def fetch_points(self):\n soup = self.get_soup(\"highscore\")\n\n # find correct line in rankings table\n line = soup.find(\"tr\", {\"class\": \"myrank\"})\n\n rank = int(line.find(\"td\", {\"class\": \"position\"}).contents[0].strip())\n points = int(line.find(\"td\", {\"class\": \"score\"}).contents[0].strip().replace(\".\", \"\"))\n\n return OrderedDict([(\"ranking\", rank), (\"points\", points)])", "def rank(self, k, arr):\n\n # arr must be sorted\n if not(arr[0] < arr[len(arr)//2] < arr[len(arr)-1]):\n raise ValueError(\"Array must be sorted\")\n\n lo = 0\n hi = len(arr) - 1\n\n while lo <= hi:\n mid = lo + (hi - lo) // 2\n\n if k < arr[mid]:\n hi = mid - 1\n elif k > arr[mid]:\n lo = mid + 1\n else:\n return mid\n\n return -1", "def n_points(self) -> ir.IntegerValue:\n return ops.GeoNPoints(self).to_expr()", "def get_rank(self):\n details = self._tab.find(\"table\", class_=\"details\")\n rank, = self.rank_re.match(details.find(\"td\", class_=\"value\").get_text()).groups()\n return rank", "def get_rank():\n if not torch.distributed.is_available():\n return 0\n if not torch.distributed.is_initialized():\n return 0\n return torch.distributed.get_rank()", "def rank(self, current_order_by_value: Comparable, current_row_number: int) -> int:\n self.current_rank += current_order_by_value != self.previous_value\n return self.current_rank", "def mpi_rank(self, new_value):", "def chooseRank(array):\n \n array_len = array.shape[0]\n current_diff = 0\n idx = 0\n \n for i in range(1, array_len):\n diff = np.abs(array[i] - array[i-1])\n \n if diff > current_diff:\n current_diff = diff\n idx = i\n \n return idx", "def get_estimated_rank(self):\n # At the moment the rank returned by this function is normally too high for either\n # my machine or the tensorly library to handle, therefore I have made it just return 1 for right now\n\n I = len(self.tdt[0])\n J = len(self.tdt[0][0])\n K = len(self.tdt)\n\n if I == 1 or J == 1 or K == 1:\n return 1\n elif I == J == K == 2:\n return 2\n elif I == J == 3 and K == 2:\n return 3\n elif I == 5 and J == K == 3:\n return 5\n elif I >= 2*J and K == 2:\n return 2*J\n elif 2*J > I > J and K ==2:\n return I\n elif I == J and K == 2:\n return I\n elif I >= J*K:\n return J*K\n elif J*K - J < I < J*K:\n return I\n elif I == J*K - I:\n return I\n else:\n print(I, J, K, \"did not have an exact estimation\")\n return min(I*J, I*K, J*K)", "def rank_training(self, fleet):\n if (len(fleet.ships) == 0):\n return get_rank(0)\n ins = fleet.get_ship_instances()\n sum_lvl = sum(x.level for x in ins)\n avg_lvl = sum_lvl // len(fleet.ships)\n # auto S if fleet > 50% avg lvl of difficulty\n if (avg_lvl >= self.avg_lvl * 1.5):\n return get_rank(1)\n if (avg_lvl >= self.avg_lvl): # must succeed if fleet > avg lvl\n avg_weight = SUCCESS_THRESHOLD + (1 - SUCCESS_THRESHOLD) / 2\n wgt = random.gauss(avg_weight, (1 - SUCCESS_THRESHOLD) / 2) \\\n + inv_lerp(avg_lvl, self.avg_lvl, self.avg_lvl * 1.5) \\\n * (1 - SUCCESS_THRESHOLD) / 2\n wgt = max(SUCCESS_THRESHOLD, min(1.0, wgt))\n return get_rank(wgt)\n wgt = inv_lerp(avg_lvl, 0, self.avg_lvl) * (SUCCESS_THRESHOLD * 0.67)\n return get_rank(abs(random.gauss(0, SUCCESS_THRESHOLD * 0.33)) + wgt)", "def get_p_at_n_in_m(data, n, k, ind):\n pos_score = data[ind][0]\n curr = data[ind:ind + n]\n curr = sorted(curr, key=lambda x: x[0], reverse=True)\n if curr[k - 1][0] <= pos_score:\n return 1\n return 0", "def kind(n, ranks):\n # count2rank = {ranks.count(r):r for r in set(ranks)}\n # return count2rank.get(n,None)\n for r in ranks:\n if ranks.count(r)==n:\n return r\n return None" ]
[ "0.77205133", "0.7398081", "0.7045258", "0.70051664", "0.6996832", "0.6941575", "0.6934668", "0.6901205", "0.6886502", "0.6873956", "0.68593204", "0.68586534", "0.6857211", "0.684011", "0.68172395", "0.68114007", "0.68016917", "0.6763181", "0.6714917", "0.6704349", "0.6667565", "0.66063935", "0.6605187", "0.6545122", "0.65383613", "0.65299815", "0.64528286", "0.64409536", "0.64391875", "0.64135253", "0.6412606", "0.6412149", "0.64116484", "0.639169", "0.63828707", "0.63811684", "0.6370111", "0.6350162", "0.6324636", "0.6310646", "0.6298691", "0.6280046", "0.6278735", "0.6278735", "0.6278563", "0.6278563", "0.6278563", "0.6278563", "0.6278563", "0.62712055", "0.62603915", "0.6258705", "0.6177439", "0.6162917", "0.6125958", "0.6118122", "0.60771793", "0.6069738", "0.6069552", "0.60657406", "0.60507154", "0.60505635", "0.6028637", "0.6028599", "0.6021632", "0.602082", "0.6017139", "0.60134286", "0.60086876", "0.5973133", "0.5969076", "0.59542835", "0.59531087", "0.5938577", "0.5933048", "0.5926659", "0.5919384", "0.59107083", "0.5909036", "0.59037834", "0.5897458", "0.5876721", "0.5870967", "0.5860493", "0.58570504", "0.58525306", "0.58487934", "0.58403623", "0.58396965", "0.582458", "0.5814372", "0.5808111", "0.5800647", "0.57966727", "0.5789262", "0.57849455", "0.5783661", "0.5770702", "0.57523614", "0.57510084" ]
0.76279044
1
Yield successive nsized chunks from l.
def chunks(l, n): for i in range(0, len(l), n): yield l[i:i + n]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _chunk(self, l, n):\n for i in range(0, len(l) + 1, n):\n yield l[i:i + n]", "def chunks(self, l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def __chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def get_chunks(self, l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i+n]", "def chunks(self, l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i+n]", "def _chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]", "def _chunks(l, n):\n\tfor i in range(0, len(l), n):\n\t\tyield l[i:i + n]", "def chunks(cls, l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, n):\n yield l[i::n]", "def chunks(self, l, n):\n yield l[:n-1]\n for i in range(n-1, len(l), n):\n yield l[i:i+n]", "def chunks(l: List, n: int):\n for i in range(0, len(l), n):\n yield l[i : i + n] # noqa: E203", "def chunks(l: List, n: int):\n for i in range(0, len(l), n):\n yield l[i : i + n] # noqa: E203", "def chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n): # noqa: E741\n for i in range(0, len(l), n):\n yield l[i : i + n] # noqa: E203", "def chunks(_class, l, n):\n\t\t# CITE: http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks-in-python\n\t for i in xrange(0, len(l), n):\n\t yield l[i:i+n]", "def chunks(l, n):\n\tfor i in xrange(0, len(l), n):\n\t\tyield l[i:i + n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i+n]", "def chunk(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def _chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i : i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i : i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i: i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\r\n for i in xrange(0, len(l), n):\r\n yield l[i:i+n]", "def chunks(l, n):\n if n:\n for i in xrange(0, len(l), n):\n yield l[i:i + n]" ]
[ "0.8038751", "0.7924752", "0.7923082", "0.78850657", "0.78771824", "0.7815519", "0.77652335", "0.775546", "0.77440816", "0.77313656", "0.77284646", "0.77244157", "0.770246", "0.76888484", "0.76888484", "0.766384", "0.7656689", "0.7655605", "0.7655605", "0.76455575", "0.76455575", "0.76455575", "0.763581", "0.7546799", "0.75335324", "0.7529923", "0.7480811", "0.7473063", "0.7473045", "0.7473045", "0.74527764", "0.74527764", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.7448663", "0.74378693", "0.7437648", "0.7437648", "0.7437648", "0.7437648", "0.7437648", "0.7437648", "0.7437648", "0.7437648", "0.7437648", "0.7437648", "0.7437648", "0.74218327", "0.73889136" ]
0.7537613
27
Check if at top or bottoom and move target
def update_target(self): self.check_top() self.check_bottom() self.update() self.screen.fill(self.target_color, self.rect)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_top(self):\n\t\tif self.rect.top <=0:\n\t\t\tself.target_direction = 1", "def move_towards(self, target_x, target_y, game_map, entities):\n path = game_map.compute_path(self.x, self.y, target_x, target_y)\n\n dx = path[0][0] - self.x\n dy = path[0][1] - self.y\n\n if game_map.walkable[path[1][0], path[1][1]] and \\\n not get_blocking_entities_at_location(entities, self.x + dx, self.y + dy):\n self.move(dx, dy)", "def move_up(self):\r\n if self.rect.top > 0:\r\n self.rect.top -= self.speed", "def move(self, target=None):\n visible_tiles = vision.vision(15, self.world_map, self.tile)\n visible_tiles = filter(BeesSprite.pollinated_filter, visible_tiles)\n target_tile = vision.find_target(visible_tiles, self.prey)\n if target_tile:\n move_to_tile = vision.approach(self.tile, target_tile, self.world_map)\n if self.is_movable_terrain(move_to_tile) and \\\n self.not_contains_sprite(move_to_tile, self.prey):\n if move_to_tile == target_tile:\n move_to_tile.contains_sprite.pollinate()\n AnimalSprite.move(self, move_to_tile)\n else:\n AnimalSprite.move(self)\n else:\n AnimalSprite.move(self)", "def hits_top_or_bottom(self):\n if self.y >= self.scene.screen.get_height() - self.image.get_height() or self.y <= 0:\n return True\n else:\n return False", "def move_to(self, target):\n self.map.breadth_first_search(self.position, target)\n path = self.map.get_path(target, self.position)\n for node in path[1:]:\n mask = (\n node.x - self.position.x,\n node.y - self.position.y\n )\n direction = self.MASKS[mask]\n self.move(direction)", "def move(self, target=None):\n visible_tiles = vision.vision(self.vision, self.world_map, self.tile)\n target_tile = vision.find_target(visible_tiles, self.prey)\n if target_tile:\n move_to_tile = vision.approach(self.tile, target_tile, self.world_map)\n if self.is_movable_terrain(move_to_tile) and self.not_contains_sprite(move_to_tile, self.prey):\n if move_to_tile == target_tile:\n move_to_tile.contains_sprite.die()\n AnimalSprite.move(self, move_to_tile)\n else:\n AnimalSprite.move(self)\n else:\n AnimalSprite.move(self)", "def move_up(self):\n #if user moves paddle right on top of screen, they won't be able to move it more upwards by using this if statement\n #SCREEN_HEIGHT - 20 = Exact number of pixels where paddle can stop exactly on top edge but still has its body fully shown\n if self.center.y < SCREEN_HEIGHT - 20:\n self.center.y += MOVE_AMOUNT", "def run(self, target):\n linear_dist = lambda x1, x2, y1, y2: math.sqrt((x1 - x2)**2 + \n (y1 - y2)**2)\n max_dist_to_target = linear_dist(self.x, target.x, \n self.y, target.y)\n possible_posn = [[1, 0], [-1, 0], [0, 1], [0, -1]]\n move_to_make = None\n\n for posn in possible_posn:\n if not self.handler.world.is_solid(self.x + posn[0], self.y + posn[1]):\n new_dist = linear_dist(self.x + posn[0], target.x, \n self.y + posn[1], target.y)\n if new_dist > max_dist_to_target:\n max_dist_to_target = new_dist\n move_to_make = posn\n\n if move_to_make:\n self.move(move_to_make[0], move_to_make[1])", "def move_down(self):\r\n if self.rect.bottom < BG_HEIGHT - 60:\r\n self.rect.top += self.speed", "def move_to_position2(self):", "def head_towards(self):\n dest = self.target_destination - self.location\n if dest.length() != 0:\n dest.scale_to_length(self.speed)\n dest.normalize()\n self.rect.left += dest.x\n self.rect.top += dest.y", "def move(self):\n \n # checks for bots nearby\n next_move = self.follow()\n \n # finds a random move if no bot\n if next_move is self.position:\n self.position = self.wander()\n else:\n self.position = next_move", "def move_to(self, target):\n # type: (RoomPosition) -> None\n hive = self.home.hive\n home = self.find_home()\n origin = self.find_origin()\n\n total_distance = hive.honey.find_path_length(origin, target, self.new_movement_opts())\n\n min_distance_from_home = Infinity\n min_distance_to_origin = Infinity\n min_distance_to_target = movement.chebyshev_distance_room_pos(self.members_movement_order()[0].pos, target)\n max_distance_to_target = -Infinity\n any_hostiles = False\n for member in self.members:\n distance_to_home = movement.chebyshev_distance_room_pos(member.pos, home)\n distance_to_origin = movement.chebyshev_distance_room_pos(member.pos, origin)\n distance_to_target = movement.chebyshev_distance_room_pos(member.pos, target)\n if distance_to_home < min_distance_from_home:\n min_distance_from_home = distance_to_home\n if distance_to_target > max_distance_to_target:\n max_distance_to_target = distance_to_target\n if distance_to_origin < min_distance_to_origin:\n min_distance_to_origin = distance_to_origin\n if len(member.room.find(FIND_HOSTILE_CREEPS)):\n any_hostiles = True\n\n if min_distance_to_origin > 100:\n mv_order = self.members_movement_order()\n self.set_origin(mv_order[len(mv_order) - 1].pos)\n if min_distance_from_home < 50 and (max_distance_to_target < total_distance / 2):\n self.log(\"move_to: chose stage 0 (minimum distance from home: {}, maximum distance from home: {},\"\n \" total distance: {})\"\n .format(min_distance_from_home, max_distance_to_target, total_distance))\n self.move_to_stage_0(target)\n elif min_distance_to_target < 300 and any_hostiles:\n self.move_to_stage_2(target)\n elif min_distance_to_target > 60 or max_distance_to_target > 200:\n # self.log(\"move_to: chose stage 1 (minimum distance from home: {}, total distance: {}, \"\n # \"minimum distance to target: {}, maximum distance to target: {})\"\n # .format(min_distance_from_home, total_distance,\n # min_distance_to_target, max_distance_to_target))\n self.move_to_stage_1(target, any_hostiles)\n else:\n # self.log(\"move_to: chose stage 2 (minimum distance from home: {}, total distance: {}, \"\n # \"minimum distance to target: {}, maximum distance to target: {})\"\n # .format(min_distance_from_home, total_distance,\n # min_distance_to_target, max_distance_to_target))\n self.move_to_stage_2(target)", "def _ispinnedmove(self, from_, to_):\n return False", "def move_down(self):\n #if user moves paddle right below on the screen, they won't be able to move it more downwards by using this if statement\n #SCREEN_HEIGHT - 280 = Exact number of pixels where paddle can stop exactly on bottom edge but still has its body fully shown\n if self.center.y > SCREEN_HEIGHT - 280:\n self.center.y -= MOVE_AMOUNT", "def move_me(self):\r\n\t\t#self.start_pos = self.rect.center\t\t\t\r\n\t\tif self.goal_pos is not None:\r\n\t\t\tprint(f'goal_pos: {self.goal_pos}, start_pos: {self.start_pos}')\r\n\t\t\tdx = self.goal_pos[0] - self.start_pos[0]\r\n\t\t\tdy = self.goal_pos[1] - self.start_pos[1]\r\n\r\n\t\t\tdistance = math.sqrt(dx*dx + dy*dy)\r\n\t\t\tself.shift += self.speed\r\n\r\n\t\ttry:\r\n\t\t\tif self.shift/distance < 0.99:\r\n\t\t\t\tself.rect.center = (self.start_pos[0] + self.shift/distance * dx,\r\n\t\t\t\t\t\t\t\t\t self.start_pos[1] + self.shift/distance * dy)\r\n\t\t\t\tprint(f'going to: {self.goal_pos}')\r\n\t\texcept ZeroDivisionError:\r\n\t\t\t\tpass\t\r\n\t\treturn True", "def move_to_position1(self):", "def chase(self, target):\n linear_dist = lambda x1, x2, y1, y2: math.sqrt((x1 - x2)**2 + \n (y1 - y2)**2)\n min_dist_to_target = linear_dist(self.x, target.x, \n self.y, target.y)\n possible_posn = [[1, 0], [-1, 0], [0, 1], [0, -1]]\n move_to_make = None\n\n for posn in possible_posn:\n if (self.x + posn[0] == self.handler.player.x and \n self.y + posn[1] == self.handler.player.y and \n self.handler.game_state != data.DEAD):\n dmg = self.deal_damage(self.handler.player)\n\n if dmg:\n self.handler.message_box.add_msg(\"{} attacks you for {} damage!\".format(self.name, dmg), \n data.COLOURS['mob_atk_text'])\n else:\n self.handler.message_box.add_msg(\"{} missed!\".format(self.name), \n data.COLOURS['mob_atk_text'])\n\n if self.handler.game_state == data.DEAD:\n self.handler.message_box.add_msg(\"{} killed you!\".format(self.name),\n data.COLOURS['player_die_text'])\n elif not self.handler.world.is_solid(self.x + posn[0], self.y + posn[1]):\n new_dist = linear_dist(self.x + posn[0], target.x,\n self.y + posn[1], target.y)\n if new_dist < min_dist_to_target:\n min_dist_to_target = new_dist\n move_to_make = posn\n\n if move_to_make:\n self.move(move_to_make[0], move_to_make[1])", "def movePlayerTo(self, target):\n if self.player:\n row = 1\n if not self.player.first: # player 1 or 2\n row = -1\n\n if self.player.king:\n if abs(target.row - self.row) == 1 and abs(target.col - self.col) == 1: # move\n target.player = self.player\n self.player = None\n self.diselect()\n target.checkKing()\n return 1\n if abs(target.row - self.row) == 2 and abs(target.col - self.col) == 2: # eat\n mid = getBlockBetween(self, target)\n debugBoard()\n if mid.player and mid.player.first != self.player.first: # can eat\n mid.player = None\n target.player = self.player\n self.player = None\n self.diselect()\n target.checkKing()\n return 2\n pass\n else:\n if target.row == self.row + row and abs(target.col - self.col) == 1: # move\n target.player = self.player\n self.player = None\n self.diselect()\n target.checkKing()\n return 1\n if target.row == self.row + row * 2 and abs(target.col - self.col) == 2: # eat\n mid = getBlockBetween(self, target)\n debugBoard()\n if mid.player and mid.player.first != self.player.first: # can eat\n mid.player = None\n target.player = self.player\n self.player = None\n self.diselect()\n target.checkKing()\n getGame().board.checkWin()\n return 2\n return 0", "def target_position(self, time):\n pass", "def target_position(self, time):\n pass", "def follow(self):\n \n # create list to add with moves\n pos_list = [self.position, self.position, self.position, self.position]\n # list of surrounding indices\n moveset = add_lists(self.moves, pos_list)\n \n # checks if there is bot nearby\n for item in moveset:\n if type(item) is Bot:\n return item\n \n # if no bot found, returns original position\n return self.position", "def move(self, direction, max_height):\n if direction > 0:\n self.y_pos -= self.SPEED\n elif direction < 0:\n self.y_pos += self.SPEED\n\n if self.y_pos >= max_height - 40:\n self.y_pos = max_height - 40", "def move_to_target(self, target_row, target_col, row, col):\r\n move = \"\"\r\n # typical move to move target tile to target pos.\r\n solver_move = \"druld\"\r\n # move up first\r\n move = (target_row - row) * \"u\"\r\n # conditional statements for moving the tile:\r\n # 1. case curr_pos of tile and target_tile are in same col\r\n if (target_col - col) == 0:\r\n move += \"ld\" + ((target_row - row) - 1) * solver_move\r\n else:\r\n # 2. curr_pos of tile is on the left of target pos\r\n if (target_col - col) > 0:\r\n move += (target_col - col) * \"l\"\r\n if row == 0:\r\n move += (abs(target_col - col) - 1) * \"drrul\"\r\n else:\r\n move += (abs(target_col - col) - 1) * \"urrdl\"\r\n # 3. curr_pos of tile is on the right of target pos:\r\n elif (target_col - col) < 0:\r\n move += (abs(target_col - col) - 1) * \"r\"\r\n if row == 0:\r\n move += abs(target_col - col) * \"rdllu\"\r\n else:\r\n move += abs(target_col - col) * \"rulld\"\r\n move += (target_row - row) * solver_move\r\n return move", "def move_down(self):\n if self.center.y > (self.height / 2):\n self.center.y -= 5", "def move_inward_outward(self):\r\n\r\n if self.movement == \"inward_outward\" and self.flag_move:\r\n leftPos, topPos, rightPos, bottomPos = self.canvas.coords(self.ball)\r\n if self.size_flag:\r\n self.change_size(\"larger\")\r\n elif not self.size_flag:\r\n self.change_size(\"smaller\")\r\n # If the ball hits a wall, change inward to outward.\r\n if leftPos <= 0 or rightPos >= 400 or topPos <= 0 or bottomPos >= 400:\r\n self.size_flag = 0\r\n # If the ball size reaches 1, change outward to inward.\r\n elif self.size == 1:\r\n self.size_flag = 1\r\n self.canvas.after(50, self.move_inward_outward)", "def move_unit(self, obs, for_subgroup=False):\n target = self.target\n if for_subgroup:\n current_location = self.get_avg_location_of_self_subgroup(obs)\n else:\n current_location = self.get_current_location(obs)\n dist_to_target = 2\n if ((abs(current_location[0] - target[0]) >= dist_to_target) or\n (abs(current_location[1] - target[1]) >= dist_to_target)):\n return {\n \"function\": actions.FUNCTIONS.Move_screen(\"now\", (self.target[0], self.target[1])),\n \"status\": \"MOVING_TO_TARGET\"\n }\n else:\n return {\n \"function\": actions.FUNCTIONS.Move_screen(\"now\", (self.target[0], self.target[1])),\n \"status\": \"ARRIVED_AT_TARGET\"\n }", "def movement(self, screen):\n if self.tx is not None and self.ty is not None: # Target is set\n\n X = self.x - self.tx\n Y = self.y - self.ty\n\n if X < 0: # --->\n self.img = pygame.image.load(next(self.walking_east_images))\n self.x += self.velocity\n elif X > 0: # <----\n self.img = pygame.image.load(next(self.walking_west_images))\n self.x -= self.velocity\n if Y > 0: # up\n self.img = pygame.image.load(next(self.walking_north_images))\n self.y -= self.velocity\n elif Y < 0: # dopwn\n self.img = pygame.image.load(next(self.walking_south_images))\n self.y += self.velocity\n screen.blit(self.img, (self.x, self.y))\n\n if X == 0 and Y == 0:\n self.tx, self.ty = None, None\n self.agent.actionCompleted()", "def time_to_move(self):\r\n if int(self.pix_pos.x+TOP_BOTTOM_BUFFER//2) % self.app.cell_width == 0:\r\n if self.direction == vec(1, 0) or self.direction == vec(-1, 0) or self.direction == vec(0, 0):\r\n return True\r\n # for the x-direction\r\n\r\n if int(self.pix_pos.y+TOP_BOTTOM_BUFFER//2) % self.app.cell_height == 0:\r\n if self.direction == vec(0, 1) or self.direction == vec(0, -1) or self.direction == vec(0, 0):\r\n return True\r\n # for the y-direction\r\n\r\n # checks to see if the player is still within the bounds\r", "def moveDown(self):\n if self._position.y != 14:\n self._position.y +=1\n return True\n return False", "def move_up(self):\n if self.center.y < (self.screen_height - (self.height / 2)):\n self.center.y += 5", "def jump(self):\n \n # move down and see if there's a platform below us.\n # Move down 2 pixels because it doesn't work well if you only move down\n # 1 when working with a platform moving down.\n self.rect.y += 2\n platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n self.rect.y -= 2\n \n # If it is ok to jump, set the speed upwards\n if len(platform_hit_list) > 0 or self.rect.bottom >= SCREEN_HEIGHT:\n self.change_y = -10", "def jump(self):\n\n # move down a bit and see if there is a platform below us.\n # Move down 2 pixels because it doesn't work well if we only move down\n # 1 when working with a platform moving down.\n self.rect.y += 2\n platform_hit_list = pygame.sprite.spritecollide(\n self, self.platforms, False)\n self.rect.y -= 2\n\n # If it is ok to jump, set our speed upwards\n if len(platform_hit_list) > 0 or self.rect.bottom >= WIN_HEIGHT:\n self.change_y = -10", "def _move_up(self) -> bool:\n current_agent_node = self._maze.get_player_node()\n\n if current_agent_node.y == 0:\n # Can't go up. Already on the top row\n return False\n else:\n next_node = self._maze.get_node_up(current_agent_node)\n return self._handle_movement(current_agent_node, next_node)", "def moveUp(self):\n if self._position.y != 0:\n self._position.y -=1\n return True\n return False", "def jump(self):\n \n # move down a bit and see if there is a platform below us.\n # Move down 2 pixels because it doesn't work well if we only move down 1\n # when working with a platform moving down.\n self.rect.y += 2\n platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n self.rect.y -= 2\n \n # If it is ok to jump, set our speed upwards\n if len(platform_hit_list) > 0: #or self.rect.bottom >= SCREEN_HEIGHT:\n self.change_y = -10", "def move():\n if randrange(40) == 0:\n y = randrange(-150, 150)\n target = vector(200, y)\n targets.append(target)\n\n for target in targets: # velocidad de los targets\n target.x -= target_speed\n\n if inside(ball):\n speed.y -= 0.35\n ball.move(speed)\n\n dupe = targets.copy()\n targets.clear()\n\n for target in dupe:\n if abs(target - ball) > 13:\n targets.append(target)\n\n for target in targets:\n if not inside(target):\n target.x = 200\n\n draw()\n\n ontimer(move, 50)", "def _check_autos_top(self):\n\t\tscreen_rect = self.screen.get_rect()\n\t\tfor auto in self.autos.sprites():\n\t\t\tif auto.rect.top <= screen_rect.top:\n\t\t\t\t# Treat this the same as if the pigeon got hit.\n\t\t\t\tself._pigeon_hit()\n\t\t\t\tbreak", "def move(self):\n if self.ycor() > 280: self.y_dir = -1 # Set vertical movement to down if ball at top of screen\n if self.xcor() > 380: self.x_dir = -1 # Set horizontal movement to left if ball at right of screen\n if self.xcor() < -380: self.x_dir = 1 # Set horizontal movement to right if ball at left of screen\n new_x = self.xcor() + self.x_dir * 2 # Define 2 spaces forward in set horizontal dir of travel\n new_y = self.ycor() + self.y_dir * 2 # Define 2 spaces forward in set vertical dir of travel\n self.goto(new_x, new_y) # Move ball to newly defined position", "def moveCurrentNodeToTarget(self, checked=False):\n\n c = self.c\n p = c.p\n\n vnodes = [i.v for i in c.getSelectedPositions()]\n\n needs_undo = self.type_ != \"jump\"\n\n if needs_undo:\n bunch = c.undoer.beforeMoveNode(p)\n\n for v in vnodes:\n\n p2 = c.vnode2position(self.target)\n p = c.vnode2position(v)\n\n if not c.positionExists(p2):\n g.error('Target no longer exists: %s' % self.targetHeadString)\n return\n\n if self.type_ in ('clone', 'move'): # all others are always valid?\n if p.v == p2.v or not self.checkMove(p, p2):\n g.error('Invalid move: %s' % (self.targetHeadString))\n return\n if p2.isAncestorOf(p): # not for sibling moves\n p2.expand()\n nxt = p.visNext(c) or p.visBack(c)\n nxt = nxt.v\n # store a VNode instead of position as positions are too easily lost\n\n if self.type_ != 'jump':\n p.setDirty() # before move to dirty current parent\n p2.setDirty()\n c.setChanged()\n\n if self.type_ == 'clone':\n p = p.clone()\n\n if self.type_ in ('move', 'clone'):\n if self.which == 'first child':\n p.moveToFirstChildOf(p2)\n elif self.which == 'last child':\n p.moveToLastChildOf(p2)\n elif self.which in ('next sibling', 'prev sibling'):\n if not p2.parent():\n raise NotImplementedError(\"Not implemented for top-level nodes\") #FIXME\n if self.which == 'next sibling':\n p.moveToNthChildOf(p2.parent(), p2._childIndex)\n elif self.which == 'prev sibling':\n p.moveToNthChildOf(p2.parent(), p2._childIndex - 1)\n else:\n raise TypeError(f\"Unknown move type: {self.which!r}\")\n\n elif self.type_ == 'bkmk':\n unl = self.computeUNL(p) # before tree changes\n if self.which == 'first child':\n nd = p2.insertAsNthChild(0)\n elif self.which == 'last child':\n nd = p2.insertAsLastChild()\n elif self.which == 'next sibling':\n nd = p2.insertAfter()\n elif self.which == 'prev sibling':\n nd = p2.insertBefore()\n else:\n raise TypeError(f\"Unknown move type: {self.which!r}\")\n h = p.anyAtFileNodeName() or p.h\n while h and h[0] == '@':\n h = h[1:]\n nd.h = h\n nd.b = unl\n\n elif self.type_ == 'copy':\n\n if self.which == 'first child':\n nd = p2.insertAsNthChild(0)\n quickMove.copy_recursively(p, nd)\n # unlike p.copyTreeFromSelfTo, deepcopys p.v.u\n elif self.which == 'last child':\n nd = p2.insertAsLastChild()\n quickMove.copy_recursively(p, nd)\n elif self.which == 'next sibling':\n nd = p2.insertAfter()\n quickMove.copy_recursively(p, nd)\n elif self.which == 'prev sibling':\n nd = p2.insertBefore()\n quickMove.copy_recursively(p, nd)\n else:\n raise TypeError(f\"Unknown move type: {self.which!r}\")\n\n elif self.type_ in ('linkTo', 'linkFrom'):\n blc = getattr(c, 'backlinkController', None)\n if blc is None:\n g.es(\"Linking requires backlink.py plugin\")\n return\n if self.type_ == 'linkTo':\n blc.vlink(p.v, p2.v)\n else:\n blc.vlink(p2.v, p.v)\n\n if self.type_ in ('bkmk', 'clone', 'copy', 'move'):\n nxt = c.vnode2position(nxt)\n elif self.type_ == 'jump':\n nxt = c.vnode2position(self.target)\n else:\n nxt = None # linkTo / linkFrom don't move\n\n if nxt is not None and c.positionExists(nxt):\n c.selectPosition(nxt)\n\n if needs_undo:\n c.undoer.afterMoveNode(p, 'Quick Move', bunch)\n c.setChanged()\n\n c.redraw()", "def at_target(self):\n return self.location == self.target_location", "def move(self):\n if self.lanzar == True: # Permite el movimiento de la bala, solo ocurre cuando el jugador lo inique\n self.rect.move_ip(0,-self.speed_bullet)\n if self.rect.bottom < 100: # Permite eliminar el objeto cuando este alcanza una altura calculada con respecto a la dim de la pantalla\n self.kill()", "def apply(self, target):\n return target.rect.move(self.state.topleft)", "def move_to_position(self, position):\n if position[0] == self.current[0]:\n y_distance = position[1] - self.current[1]\n if y_distance > 0:\n self.moveSouth()\n else:\n self.moveNorth()\n elif position[1] == self.current[1]:\n x_distance = position[0] - self.current[0]\n if x_distance > 0:\n self.moveEast()\n else:\n self.moveWest()", "def move(self) -> bool:\n pass", "def move(self):\n\n # get the location we WOULD go to\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n while (abs (newX) > self.BOX_RANGE) or (abs(newY) > self.BOX_RANGE):\n # print(\"choosing new direction... \",end=\"\")\n self.chooseNewDirection()\n # print(self.dx, self.dy)\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n\n # now move our monster\n super().move()", "def check_ball_on_target():\n\n pass", "def move_vertical(self):\r\n if self.movement == \"vertical\" and self.flag_move:\r\n self.move_ball()\r\n self.canvas.after(50, self.move_vertical)", "def move_to_inspect_pose(self, inspect_target):\n # calculate the arm_lift_link which must be sent\n z_head = inspect_target.z() + self.z_over\n\n # check whether moving the arm is necessary\n if z_head < 1.3:\n rospy.logdebug(\"Entity is low enough. we don't need to move the arm\")\n return True\n\n # saturate the arm lift goal\n z_arm = (z_head - self.z_hh) * self.torso_to_arm_ratio\n z_arm = min(0.69, max(z_arm, 0.0)) # arm_lift_joint limit\n\n arm = self.get_arm(required_goals=['arm_out_of_way'])\n\n # noinspection PyProtectedMember\n pose = arm._arm.default_configurations['arm_out_of_way']\n pose[0] = z_arm\n # noinspection PyProtectedMember\n arm._arm._send_joint_trajectory([pose])\n\n self.base.turn_towards(inspect_target.x(), inspect_target.y(), \"map\", 1.57)\n arm.wait_for_motion_done()\n self.base.wait_for_motion_done()\n return True", "def move_toward(state, location):\n return move_relative(state, location, True)", "def jump(self):\n \n # move down a bit and see if there is a platform below us.\n # Move down 2 pixels because it doesn't work well if we only move down 1\n # when working with a platform moving down.\n self.rect.y += 2\n platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n self.rect.y -= 2\n \n # If it is ok to jump, set our speed upwards\n if len(platform_hit_list) > 0 or self.rect.bottom >= SCR_HEIGHT:\n self.change_y = -8", "def move(self) -> None:\n\n if self.move_up:\n self.__moveUpIfPossible()\n if self.move_down:\n self.__moveDownIfPossible()", "def move_to(self, mobject_or_point):\n layer_center = self.surrounding_rectangle.get_center()\n if isinstance(mobject_or_point, Mobject):\n target_center = mobject_or_point.get_center() \n else:\n target_center = mobject_or_point\n\n self.shift(target_center - layer_center)", "def position_tile(self, target_row, target_col, current_row, current_col):\r\n moves_str = \"\"\r\n # current target is on the upper of 0\r\n if current_col == target_col and current_row < target_row:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the left of 0\r\n elif current_row == target_row and current_col < target_col:\r\n lefts = target_col - current_col\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n for dummy_cycle in range(lefts - 1):\r\n moves_str += CIRCLES[\"UP_CIRCLE\"]\r\n # current target is on the upperleft of 0\r\n elif current_row < target_row and current_col < target_col:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n lefts = target_col - current_col\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n for dummy_cycle in range(lefts - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_CIRCLE\"]\r\n moves_str += \"dru\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the upperright of 0\r\n elif current_col > target_col and current_row < target_row:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n rights = current_col - target_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n for dummy_cycle in range(rights - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_LEFT_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_LEFT_CIRCLE\"] \r\n moves_str += \"dlu\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the right of 0\r\n elif current_col > target_col and current_row == target_row:\r\n rights = current_col - target_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n for dummy_cycle in range(rights - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_LEFT_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_LEFT_CIRCLE\"] \r\n moves_str += \"ulld\"\r\n return moves_str", "def move_to_target():\n keyboard.send('f')", "def faceTowards(self, target):\n current_tile = self.current_tile()\n if(target and current_tile):\n x_dist = target.coordinates()[0] - current_tile.coordinates()[0]\n if x_dist == 0: return\n self.direction_val = x_dist/abs(x_dist)\n #TEMP\n if self.direction_val == -1:\n self.direction_id = 'left'\n if self.direction_val == 1:\n self.direction_id = 'right'", "def jump(self):\r\n if self.grounded == True:\r\n self.vel.y = -13", "async def _async_move(self, target_hass_position):\n current_hass_position = hd_position_to_hass(self._current_cover_position)\n steps_to_move = abs(current_hass_position - target_hass_position)\n if not steps_to_move:\n return\n self._async_schedule_update_for_transition(steps_to_move)\n self._async_update_from_command(\n await self._shade.move(\n {\n ATTR_POSITION1: hass_position_to_hd(target_hass_position),\n ATTR_POSKIND1: 1,\n }\n )\n )\n self._is_opening = False\n self._is_closing = False\n if target_hass_position > current_hass_position:\n self._is_opening = True\n elif target_hass_position < current_hass_position:\n self._is_closing = True\n self.async_write_ha_state()", "def _test_stick_position(self, target):\n\n cue = np.array(self.cue_coords)\n target = np.array(target)\n\n # Get rotation matrix\n delta = target - cue\n l = np.linalg.norm(delta)\n rotation = np.array([[delta[1] / l, -delta[0] / l], [delta[0] / l, delta[1] / l]])\n\n rot_start = rotation.dot(target)\n rot_end = rotation.dot(cue)\n\n for ball in self.other_balls:\n rot_ball = rotation.dot(np.array(ball))\n dist = np.abs(rot_ball[0] - rot_start[0])\n if dist < 2.1 * self.ball_radius:\n return False\n\n return True", "def move(self):\n pass", "def automove(self):\n if self.x < self.end_cinematic_x_pos:\n self.x += self.SHIP_SPEED\n if self.x > self.end_cinematic_x_pos:\n self.x -= self.SHIP_SPEED\n if self.y < self.end_cinematic_y_pos:\n self.y += self.SHIP_SPEED\n if self.y > self.end_cinematic_y_pos:\n self.y -= self.SHIP_SPEED", "def target_mode(self):\r\n if not self.in_target_mode:\r\n # If not in target mode, choose a target\r\n while not inside_polygon(self.target.x,self.target.y, poi):\r\n self.target=PVector(random(-1,1),random(-1,1),0)\r\n self.target.setMag(random_g(20,3))\r\n self.target.add(self.pos)\r\n self.target = random_p([(self.target, 3),(self.coh, 2)])\r\n if self.target.x > field.x:\r\n self.target.x = field.x\r\n elif self.target.x < 0:\r\n self.target.x =0\r\n if self.target.y > field.y:\r\n self.target.y = field.y\r\n elif self.target.y < 0:\r\n self.target.y =0\r\n # print(\"new target\", self.target.x, self.target.y)\r\n self.in_target_mode = True\r\n \r\n if PVector.dist(self.pos, self.target) < 3:\r\n self.in_target_mode = False\r\n if self.exiting:\r\n self.status = 3\r\n else:\r\n self.status = random_p([(0,3),(1,6),(2,5)])\r\n else:\r\n self.targeting()", "def step_towards(self, x, y, target_x, target_y):\n path = libtcod.path.new_using_map(self.fov_map)\n libtcod.path.compute(path, x, y, target_x, target_y)\n (t_x, t_y) = libtcod.path.walk(path, False)\n if t_x is None:\n return None, None\n else:\n return t_x - x, t_y - y", "def move_up(self):\n if self.pointer != 0:\n logging.debug(\"moved up\")\n self.pointer -= 1\n self.refresh()\n self.reset_scrolling()\n return True\n else:\n return False", "def did_collide_top_bottom(self):\n\n y_coord = self.getY()\n return y_coord < 0 or (y_coord + self.ball_size[1]) > Configuration.windowHeight", "def move_me_on_spawn(self):\r\n\t\tif self.points_to_go:\r\n\t\t\tself.start_pos = self.points_to_go[0]\r\n\t\t\tfor point in self.points_to_go[1:]:\r\n\t\t\t\tfor i in range(len(self.points_to_go[1:])):\r\n\t\t\t\t\tself.goal_pos = self.points_to_go[i]\r\n\t\t\t\t\t\r\n\t\t\t\t\tself.move_me()\r\n\t\t\t\t\t#self.start_pos = \r\n\t\t\t\t\t#print(self.goal_pos)\r\n\t\t\t\t\t#if self.move_me():\r\n\t\t\t\t\t#\ti += 1\r\n\t\t\t\t\t#\tprint('switch')\r", "def correct_pos(self, target_pos, last_distance):\n tank_pos = Vec2d(self.tank.body.position)\n current_distance = target_pos.get_distance(tank_pos)\n self.last_distance = current_distance\n if last_distance < current_distance:\n return True\n else:\n return False", "def _move_down(self) -> bool:\n current_agent_node = self._maze.get_player_node()\n\n if current_agent_node.y == self._settings.nrows - 1:\n # Can't go down. Already on the bottom row.\n return False\n else:\n next_node = self._maze.get_node_down(current_agent_node)\n return self._handle_movement(current_agent_node, next_node)", "def _move_our_paddle(self, action) -> None:\n if not isinstance(action, int):\n action = action.item() # pops the item if the action is a single tensor\n assert action in [a for a in self.action_meanings.keys()], f\"{action} is not a valid action\"\n if action == self.actions['UP']:\n if self.paddle_r.top_bound < self.top_bound:\n self.paddle_r.up()\n elif action == self.actions['DOWN']:\n if self.paddle_r.bottom_bound > self.bottom_bound:\n self.paddle_r.down()", "def autoMove(self) :\n\n\t\tdx = Places.getLoc(self.targetPlace)[0] - self.avatarNP.getX()\n\t\tdy = Places.getLoc(self.targetPlace)[1] - self.avatarNP.getY()\n\t\tdist = math.sqrt(dx*dx + dy*dy)\n\t\th0 = self.avatarNP.getH()\n\t\tif dist < 4 :\n\t\t\t# pick new target and determine deltaH\n\t\t\tnbors = Places.getNeighbors(self.targetPlace)\n\t\t\tx = random.randint(0,len(nbors)-1)\n\t\t\tif nbors[x] == self.oldPlace :\n\t\t\t\tx = (1 if x == 0 else x-1)\n\t\t\tt = nbors[x]\n\t\t\th = self.heading(\n\t\t\t\tself.avatarNP.getX(), self.avatarNP.getY(),\n\t\t\t\tPlaces.getLoc(t)[0], Places.getLoc(t)[1])\n\t\t\tself.deltaH = h - h0\n\t\t\tif self.deltaH > 180 : self.deltaH -= 360\n\t\t\telif self.deltaH < -180 : self.deltaH += 360\n\t\t\tself.deltaH /= 2\n\t\t\tself.oldPlace = self.targetPlace\n\t\t\tself.targetPlace = t\n\t\t\tself.turning = True\n\n\t\t# adjust heading and position\n\t\tt = self.targetPlace\n\t\th = self.heading(self.avatarNP.getX(), self.avatarNP.getY(),\n\t\t\t\t Places.getLoc(t)[0], Places.getLoc(t)[1])\n\t\tdh1 = h - h0\n\t\tif dh1 > 180 : dh1 -= 360\n\t\telif dh1 < -180 : dh1 += 360\n\t\tif self.turning :\n\t\t\tdh2 = self.deltaH * globalClock.getDt()\n\t\t\tif math.fabs(dh1) <= math.fabs(dh2) : \n\t\t\t\tself.turning = False\n\t\t\telse :\n\t\t\t\th = h0 + dh2\n\t\tself.avatarNP.setH(h)\n\t\tself.avatarNP.setFluidY(self.avatarNP,-2 * globalClock.getDt())\n\t\t\n\t\treturn\n\n\t\t\"\"\"\n\t\tif self.rotateDir == -1:\n\t\t\tself.rotateDir = random.randint(1,25) #chances to rotate\n\t\tif self.rotateDuration == -1:\n\t\t\tself.rotateDuration = random.randint(200,400)\n\n\t\t# guide the moving direction of the bot\n\t\tif self.rotateDir <= 3 : # turn left\n\t\t\tself.avatarNP.setH(self.avatarNP.getH() + \\\n\t\t\t\t\t 40 * globalClock.getDt())\n\t\t\tself.rotateDuration -= 1\n\t\t\tif self.rotateDuration <= 0:\n\t\t\t\tself.rotateDuration = -1\n\t\t\t\tself.rotateDir = -1\n\t\telif self.rotateDir <= 6 : # turn right\n\t\t\tself.avatarNP.setH(self.avatarNP.getH() - \\\n\t\t\t\t\t 50 * globalClock.getDt())\n\t\t\tself.rotateDuration -= 1\n\t\t\tif self.rotateDuration <= 0:\n\t\t\t\tself.rotateDuration = -1\n\t\t\t\tself.rotateDir = -1\n\t\telif self.rotateDir == 7 : # turn big left\n\t\t\tself.avatarNP.setH(self.avatarNP.getH() + \\\n\t\t\t\t\t 102 * globalClock.getDt())\n\t\t\tself.rotateDuration -= 1\n\t\t\tif self.rotateDuration <= 0:\n\t\t\t\tself.rotateDuration = -1\n\t\t\t\tself.rotateDir = -1\n\t\telif self.rotateDir == 8 : # turn big right\n\t\t\tself.avatarNP.setH(self.avatarNP.getH() - \\\n\t\t\t\t\t 102 * globalClock.getDt())\n\t\t\tself.rotateDuration -= 1\n\t\t\tif self.rotateDuration <= 0:\n\t\t\t\tself.rotateDuration = -1\n\t\t\t\tself.rotateDir = -1\n\t\telse :\n\t\t\tself.rotateDuration -= 1\n\t\t\tif self.rotateDuration <= 0:\n\t\t\t\tself.rotateDuration = -1\n\t\t\t\tself.rotateDir = -1\n\t\t\tself.avatarNP.setFluidPos(self.avatarNP, 0,\n\t\t\t\t\t-1 * globalClock.getDt(),\n\t\t\t\t\tself.avatarNP.getZ() )\n\t\t# moving forward\n\t\t#self.avatarNP.setFluidPos(self.avatarNP, 0,\n\t#\t\t\t\t-1 * globalClock.getDt(),\n\t#\t\t\t\tself.avatarNP.getZ() )\n\t\treturn\n\t\t\"\"\"", "def move_ball(self):\r\n self.canvas.move(self.ball, (self.x_speed * self.speed), (self.y_speed * self.speed))\r\n (leftPos, topPos, rightPos, bottomPos) = self.canvas.coords(self.ball)\r\n if leftPos <= 0 or rightPos >= 400:\r\n self.x_speed = -self.x_speed\r\n if topPos <= 0 or bottomPos >= 400:\r\n self.y_speed = -self.y_speed", "def move_randomly(self, with_fight=False):\n delta = [(-1, -1), (-1, 0), (-1, 1), (0, 1), (0, -1), (1, -1), (1, 0), (1, 1)]\n rd.shuffle(delta)\n x, y = self.owner.pos\n while len(delta) > 0:\n dx, dy = delta.pop()\n if self.move_towards_position((x + dx, y + dy)):\n return", "def move(self, is_forward):\n wh, lh = self.get_heading\n self.w += wh\n self.l += lh\n if self.get_pos() == blocks['wall']:\n self.w -= wh\n self.l -= lh", "def move(self):\n \n self.position = self.explore()", "def move(self, model):\n grid = model.grid\n possible_steps = grid.get_neighborhood(\n self.pos, moore=True, include_center=True)\n choice = random.choice(possible_steps)\n grid.move_agent(self, choice)", "def move_ball(self, from_point, to_point):\n color = self.grid.cells[from_point].ball_color\n self.grid.cells[to_point].place_ball(color)\n self.grid.cells[from_point].button.get_child().destroy()\n self.grid.cells[from_point].is_ball = False\n self.grid.cells[from_point].ball_color = None\n # sprawdzamy czy jest 5 kul w danej orientacji\n self.grid.check_balls()\n # sprawdzamy czy uzytkownik nie zapelnił całej planszy\n self.if_player_lose()\n # losujemy i ustawiamy kolejne kule\n self.grid.place_balls(BALLS_PER_CLICK)\n # sprawdzamy czy jest 5 kul w danej orientacji\n self.grid.check_balls()", "def onMoveUp(self):\n self.mainGrid.moveUp()", "def move_bot(self, direction):\n directions = {\"N\": (0, -1), \"S\": (0, 1), \"E\": (1, 0), \"W\": (-1, 0)}\n x, y = directions[direction]\n\n if self.check_move(x, y):\n # set current pos on map to \".\",\n self.map[self.y][self.x] = \".\"\n # Update self position\n self.x += x\n self.y += y\n return True\n elif self.map[self.y+y][self.x+x] == \".\":\n # Backtracking! Remove last solution item\n self.solution.pop()\n # Reset maze spot and move bot\n self.map[self.y][self.x] = \" \"\n self.x += x\n self.y += y\n return True\n else:\n return False", "def move_up(self):\n if self.pointer != 0:\n logging.debug(\"moved up\")\n self.pointer -= 1\n self.refresh()\n return True\n else: \n return False", "def force_move():\n if ZERO_BASE_PLYR_POS in range(0, 10):\n # we cant go up, so go down\n move_player(\"south\")\n else:\n move_player(\"north\")", "def test_object_move(self):\n self.assertTrue(self.obj1 in self.room1.contents)\n # use move_to hook\n self.obj1.move_to(self.room2)\n self.assertFalse(self.obj1 in self.room1.contents)\n self.assertTrue(self.obj1 in self.room2.contents)\n\n # move back via direct setting of .location\n self.obj1.location = self.room1\n self.assertTrue(self.obj1 in self.room1.contents)\n self.assertFalse(self.obj1 in self.room2.contents)", "def move_up(self):\n\n if self.ycor() > 115:\n self.sety(130)\n else:\n new_y = self.ycor() + 40\n self.sety(new_y)", "def move_target(self, distance_adjustment):\n\t\tself.x = float(self.screen_rect.right - self.width)\n\t\tself.x = self.x * distance_adjustment\n\t\tself.rect.x = self.x", "def find_valid_position(self, position: pygame.math.Vector2) -> bool:\n\n window_rect = self.ui_manager.get_root_container().rect\n\n if window_rect.contains(pygame.Rect(int(position[0]), int(position[1]), 1, 1)):\n self.rect.left = int(position.x)\n self.rect.top = int(position.y + self.hover_distance_from_target[1])\n\n if window_rect.contains(self.rect):\n self.relative_rect = self.rect.copy()\n self.text_block.set_position(self.rect.topleft)\n return True\n else:\n if self.rect.bottom > window_rect.bottom:\n self.rect.bottom = int(position.y - self.hover_distance_from_target[1])\n if self.rect.right > window_rect.right:\n self.rect.right = window_rect.right - self.hover_distance_from_target[0]\n if self.rect.left < window_rect.left:\n self.rect.left = window_rect.left + self.hover_distance_from_target[0]\n\n if window_rect.contains(self.rect):\n self.relative_rect = self.rect.copy()\n self.text_block.set_position(self.rect.topleft)\n return True\n else:\n self.relative_rect = self.rect.copy()\n warnings.warn(\"Unable to fit tool tip on screen\")\n return False\n else:\n self.relative_rect = self.rect.copy()\n warnings.warn(\"initial position for tool tip is off screen,\"\n \" unable to find valid position\")\n return False", "def _movePaddle(self):\n self._click()\n self._game.updatePaddle(self._touch)\n self._last = self._touch", "def move(self, pos_to_move):\n if (self.current_pos + 1) - pos_to_move == 1: #Move to the left\n direction = 4\n elif (self.current_pos + 1) - pos_to_move == -1: #Move to the right\n direction = 2\n elif (self.current_pos + 1) - pos_to_move == 5: #Move to the top\n direction = 1\n else: #Move to the bottom\n direction = 3\n return [MOVE, direction]", "def update(self):\n self.rect.y += self.speedy\n ## kill the sprite after it moves over the top border\n if self.rect.top > HEIGHT:\n self.kill()", "def _targeting_mode(self):\n if self._stack:\n pos = self._stack.pop(0)\n hit = grid.shoot(pos)\n shot = hit.cell\n # if we hit a ship\n if shot in SHIPS:\n self._target_ships.add(shot)\n self._stack += self._get_neighbours(pos)\n # if we sunk a ship\n if hit.result == SUNK_SHIP:\n self._target_ships.remove(shot)\n log(\"[TARGET]: Sunk \" + SHIP_NAME[shot] + \" at \" + str(pos))\n if not self._target_ships:\n self._stack = []\n self._mode = HUNTING\n log(\"[TARGET]: All targets destroyed, return to hunt.\")\n # if we just hit a ship\n else:\n log(\"[TARGET]: Hit a ship at \" + str(pos))\n elif shot == WATER:\n log(\"[TARGET]: Missed at \" + str(pos))\n # if we already hit the position\n if shot in HITS:\n shot = self.fire()\n else:\n self.shots.add(pos)\n return shot\n # if stack is empty, go back to hunting mode\n else:\n self._mode = HUNTING\n return self.fire()", "def move(self):\n\n if self.rect.right >= SCREEN_WIDTH:\n self.rect.right = SCREEN_WIDTH\n elif self.rect.left <= 0:\n self.rect.left = 0\n #elif self.rect.right < SCREEN_WIDTH and self.rect.left: \n \n self.rect.move_ip(self.speed_p,0)", "def move_down(self):\n\n if self.ycor() < -280:\n self.sety(-300)\n else:\n new_y = self.ycor() - 40\n self.sety(new_y)", "def move(self):\n \n self.position = self.wander()", "def move_to_mouth():\n mouth_pose = get_mouth_pose()\n gripper_pose = Pose()\n gripper_pose.position = mouth_pose.position\n gripper_pose.orientation = mouth_pose.orientation # don't flip since z goes into mouth\n\n actions = []\n actions.append(Action(Action.MOVE, gripper_pose))\n execute_action_sequence(actions)\n return True", "def move(self):\r\n if self.d == 'NORTH' and (self.y + 1) <= table_max_y:\r\n self.y += 1\r\n elif self.d == 'EAST' and (self.x + 1) <= table_max_x:\r\n self.x += 1\r\n elif self.d == 'SOUTH' and (self.y - 1) >= 0:\r\n self.y -= 1\r\n elif self.d == 'WEST' and (self.x - 1) >= 0:\r\n self.x -= 1\r\n else:\r\n print(\"Edge of Table Reached!\")", "def test_actionWithTargetInAdjacentDarkRoom(self):\n self.otherRoom = objects.Thing(store=self.store, name=u'Elsewhere')\n objects.Container.createFor(self.otherRoom, capacity=1000)\n objects.Exit.link(self.location, self.otherRoom, u'west')\n self.player.moveTo(self.otherRoom)\n self.observer.moveTo(self.otherRoom)\n self.assertCommandOutput(\n \"wear pants\",\n [commandutils.E(u\"Who's that?\")],\n [])", "def head_to(self, target: Tuple[float, float], speed: float = 1.5):\n pos = np.array(self.pos)\n target = np.array(target)\n\n heading = np.array(self.model.space.get_heading(pos, target))\n vector = speed * heading / np.linalg.norm(heading)\n self.model.space.move_agent(self, pos + vector)\n return", "def move_to_refine(self, des_img_pos, act_img_pos, current_world_pos, increment, img_thresh):\n des_img_x = des_img_pos[0]\n des_img_y = des_img_pos[1]\n act_img_x = act_img_pos[0]\n act_img_y = act_img_pos[1]\n cur_wld_x = current_world_pos[0]\n cur_wld_y = current_world_pos[1]\n new_wld_x = cur_wld_x\n new_wld_y = cur_wld_y\n \n #object to the left -> move left (-wld_y)\n if (act_img_x < des_img_x-img_thresh):\n print(' Moving left')\n new_wld_y = cur_wld_y + increment\n #object to the right -> move right (+wld_y)\n elif (act_img_x > des_img_x+img_thresh):\n new_wld_y = cur_wld_y - increment\n print(' Moving right')\n #object to the top -> move forward (+wld_x)\n if (act_img_y < des_img_y-img_thresh):\n new_wld_x = cur_wld_x + increment\n print(' Moving forward')\n #object to the bottom -> move backward (-wld_x)\n elif (act_img_y > des_img_y+img_thresh):\n new_wld_x = cur_wld_x - increment\n print(' Moving backward')\n \n #move arm to new coordinates\n self.move_to(new_wld_x, new_wld_y, self.move_to_height)\n \n #return new arm position\n return [new_wld_x, new_wld_y]", "def move_to_stage_2(self, target):\n # type: (RoomPosition) -> None\n ordered_members = self.members_movement_order()\n\n self.log(\"Members {} moving to {} - stage 2.\", _.pluck(ordered_members, 'name'), target)\n\n movement_opts = self.new_movement_opts()\n\n for i in range(len(ordered_members) - 1, -1, -1):\n if i == 0:\n if not ordered_members[i].pos.isEqualTo(target):\n if target == self.location:\n ordered_members[i].follow_military_path(self.find_origin(), target, movement_opts)\n else:\n ordered_members[i].move_to(target, movement_opts)\n else:\n next_drone = ordered_members[i - 1]\n this_drone = ordered_members[i]\n if this_drone.pos.isNearTo(next_drone.pos) or movement.is_edge_position(next_drone.pos):\n if this_drone.creep.fatigue and not movement.is_edge_position(next_drone.pos):\n self.log(\"drone {} at {},{} breaking due to fatigue\", i, this_drone.pos.x, this_drone.pos.y)\n break\n direction = movement.diff_as_direction(this_drone.pos, next_drone.pos)\n this_drone.creep.move(direction)\n this_drone.creep.__direction_moved = direction\n elif movement.is_edge_position(this_drone.pos):\n this_drone.move_to(next_drone)\n elif movement.chebyshev_distance_room_pos(this_drone.pos, next_drone.pos) > 3 or (\n movement.chebyshev_distance_room_pos(this_drone.pos, next_drone.pos) > 1\n and not movement.is_edge_position(next_drone.pos)\n ):\n this_drone.move_to(next_drone)\n self.log(\"drone {} at {},{} breaking due to distance\", i, this_drone.pos.x, this_drone.pos.y)\n break\n else:\n # for j in range(len(ordered_members) - 1, i, -1):\n # ordered_members[j].creep.move(\n # movement.diff_as_direction(ordered_members[j], ordered_members[j - 1]))\n moved = False\n\n if movement.chebyshev_distance_room_pos(this_drone.pos, next_drone.pos) == 2:\n # Note: we are guaranteed not to be in an edge position because if we were, the above\n # if would be triggered instead! This allows us to ignore the room name of the next pos.\n next_pos = movement.next_pos_in_direction_to(this_drone.pos, next_drone.pos)\n if movement.is_block_empty(this_drone.room, next_pos.x, next_pos.y):\n other_creeps_there = cast(List[Creep], this_drone.room.look_at(LOOK_CREEPS, next_pos))\n other_drone = _.find(other_creeps_there, 'my')\n if other_drone:\n other_drone.move(movement.diff_as_direction(other_drone.pos, this_drone.pos))\n this_drone.creep.move(movement.diff_as_direction(this_drone.pos, next_drone.pos))\n moved = True\n elif not len(other_creeps_there):\n direction = movement.diff_as_direction(this_drone.pos, next_drone.pos)\n this_drone.creep.move(direction)\n this_drone.creep.__direction_moved = direction\n moved = True\n if not moved:\n this_drone.move_to(next_drone)", "def movement(self):", "def move_rel(self):\n pass" ]
[ "0.7896249", "0.6593043", "0.64728004", "0.6443826", "0.64085305", "0.6375471", "0.6318698", "0.6313725", "0.6297276", "0.6276037", "0.6267289", "0.62628496", "0.62597704", "0.6249121", "0.62366766", "0.62253517", "0.61971354", "0.61686295", "0.6122896", "0.6106136", "0.61015", "0.61015", "0.60994065", "0.6071964", "0.60541373", "0.6040274", "0.6038528", "0.60304904", "0.60272306", "0.60222775", "0.59835684", "0.5983556", "0.5965349", "0.59618324", "0.5939003", "0.5915401", "0.5909744", "0.59080493", "0.5905882", "0.5896162", "0.5885108", "0.5884709", "0.5868469", "0.58643353", "0.58595383", "0.58443344", "0.5841121", "0.5824216", "0.5809164", "0.58087325", "0.5805198", "0.57733536", "0.57710564", "0.57705724", "0.57682323", "0.5759704", "0.57483846", "0.5740123", "0.572183", "0.57209325", "0.5718917", "0.57086384", "0.57060546", "0.5704136", "0.5701549", "0.5680742", "0.56804883", "0.56757885", "0.56589496", "0.5650683", "0.5641897", "0.5636716", "0.56344277", "0.56223685", "0.5618209", "0.5599106", "0.5589397", "0.5580382", "0.5580278", "0.5565509", "0.55581325", "0.5551348", "0.55386454", "0.5527989", "0.5524363", "0.5522692", "0.5517466", "0.5510155", "0.5505927", "0.5503034", "0.55009604", "0.54989886", "0.5496198", "0.548729", "0.5477599", "0.54729384", "0.5469632", "0.5464178", "0.54616773", "0.5460772" ]
0.6234633
15
moves the target further out as a % of the screen
def move_target(self, distance_adjustment): self.x = float(self.screen_rect.right - self.width) self.x = self.x * distance_adjustment self.rect.x = self.x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assign_upLimit():\r\n player.rect.y = 25", "def assign_downLimit():\r\n player.rect.y = 100", "def update_target(self):\n\t\tself.check_top()\n\t\tself.check_bottom()\n\t\tself.update()\n\t\tself.screen.fill(self.target_color, self.rect)", "def move(self):\n if random.random() < 0.5:\n self.y = (self.y + 1) % 100\n else:\n self.y = (self.y - 1) % 100\n if random.random() < 0.5:\n self.x = (self.x + 1) % 100\n else:\n self.x = (self.x - 1) % 100", "def update(self):\n\t\tself.y += (self.settings.target_speed * self.target_direction)\n\t\tself.rect.y = self.y", "def move_finger5(percent):\n percent = _clamp_percent(percent)\n _send_request(f5=percent)", "def move(self):\n self.val = (pygame.mouse.get_pos()[\n 0] - self.xpos - 10) / 80 * (self.maxi - self.mini) + self.mini\n if self.val < self.mini:\n self.val = self.mini\n if self.val > self.maxi:\n self.val = self.maxi", "def _walk(self):\n new_pos = self.rect.move((self.move, 0)) # move 9 pixel to the right per frame\n if self.rect.left < self.area.left or self.rect.right > self.area.right:\n self.move = -self.move # move to the opposite direction when the chimp position exceeds the screen\n new_pos = self.rect.move((self.move, 0))\n self.image = pygame.transform.flip(\n self.image, 1, 0\n ) # mirror the chimp to make it looks like turning around\n self.rect = new_pos", "def head_towards(self):\n dest = self.target_destination - self.location\n if dest.length() != 0:\n dest.scale_to_length(self.speed)\n dest.normalize()\n self.rect.left += dest.x\n self.rect.top += dest.y", "def move_finger4(percent):\n percent = _clamp_percent(percent)\n _send_request(f4=percent)", "def move(self):\r\n min_x = self.__screen.SCREEN_MIN_X\r\n min_y = self.__screen.SCREEN_MIN_Y\r\n delta_x = self.__screen.SCREEN_MAX_X - min_x\r\n delta_y = self.__screen.SCREEN_MAX_Y - min_y\r\n\r\n # new location formula according to pdf.\r\n new_x = (self.__x_speed + self.__x - min_x) % delta_x + min_x\r\n new_y = (self.__y_speed + self.__y - min_y) % delta_y + min_y\r\n self.__x, self.__y = new_x, new_y", "def move_finger2(percent):\n percent = _clamp_percent(percent)\n _send_request(f2=percent)", "def move_to_start(self):\n self.pos = (SCREEN_WIDTH / 2, SCREEN_HEIGHT - 64)", "def move_finger1(percent):\n percent = _clamp_percent(percent)\n _send_request(f1=percent)", "def move_up(self):\n if self.center.y < (self.screen_height - (self.height / 2)):\n self.center.y += 5", "def set_fan_target(self, target_percent):\n self.__fan_target = target_percent\n self.fan_speed_dac.set_output_scaled(1.0 - (target_percent / 100.0))", "def assign_rightLimit():\r\n player.rect.x = WIDTH - 75", "def move(self, max_width):\n if self.x_pos <= 20:\n self.SPEED = abs(self.SPEED)\n elif self.x_pos >= max_width - 40:\n self.SPEED = -abs(self.SPEED)\n self.x_pos += self.SPEED", "def move(self):\n\n if self.rect.right >= SCREEN_WIDTH:\n self.rect.right = SCREEN_WIDTH\n elif self.rect.left <= 0:\n self.rect.left = 0\n #elif self.rect.right < SCREEN_WIDTH and self.rect.left: \n \n self.rect.move_ip(self.speed_p,0)", "def move(self):\n\n if self.range > 0:\n self.dirty = 1\n self.rect.move_ip([self.x * self.speed, self.y * self.speed])\n self.range -= self.speed\n else:\n self.kill()", "def move_finger3(percent):\n percent = _clamp_percent(percent)\n _send_request(f3=percent)", "def update(self):\n \n self.rect.x += self.change_x\n self.rect.y += self.change_y\n \n if self.rect.x < 0:\n self.rect.x = 0\n if self.rect.x > screen_width - 60:\n self.rect.x = screen_width - 60\n if self.rect.y < 0:\n self.rect.y = 0 \n \n if self.rect.y > screen_height - 60:\n self.rect.y = screen_height - 60", "def moveBasedOnCurrentMomentum(self):\n self.xPos-=self.xMomentum\n self.yPos-=self.yMomentum\n self.syncSpriteCoordinates()", "def jump(self):\n self.vy = -9", "def move_down(self):\r\n if self.rect.bottom < BG_HEIGHT - 60:\r\n self.rect.top += self.speed", "def move_to(self, new_pos, pass_go=True):\r\n new_pos = new_pos % 40\r\n if self.pos > new_pos and pass_go:\r\n self.money += 200\r\n self.pos = new_pos", "def move_up(self):\r\n if self.rect.top > 0:\r\n self.rect.top -= self.speed", "def move_down(self):\n self.y -= 1", "def move_down(self):\n if self.center.y > (self.height / 2):\n self.center.y -= 5", "def move_up(self):\n self.move_measurement(-1)", "def move_car(self):\n a = self.h / 50\n self.x += self.speed_x / FPS\n if self.x + 170 * a >= 1100:\n self.dir = -1\n self.speed_x = -self.speed_x\n if self.x - 170 * a <= 50:\n self.dir = 1\n self.speed_x = -self.speed_x", "def update(self):\n self.rect.y -= self.y_speed # Pawns move up the screen at the speed specified", "def advance(self):\r\n #if see if the UFO is almost at the edge of the screen\r\n if (self.center.x >= SCREEN_WIDTH-20 or self.center.y >= SCREEN_HEIGHT-20):\r\n #if it is change the velocity to negative to reverse direction\r\n self.velocity.dx *= -2\r\n self.velocity.dy *= -2\r\n \r\n # set x equal to x plus dx\r\n self.center.x += self.velocity.dx\r\n # set y equal to y plus dy\r\n self.center.y += self.velocity.dy\r\n #draw the flying object at its new point.\r\n self.draw()", "def move_down(self):\n self.move_measurement(1)", "def move(self):\n \n self.position = self.wander()", "def move(self):\n if self.x_pos < const.screenwidth:\n self.x_pos += 1\n self.x_pos = self.x_pos\n\n self.draw()\n return", "def move_abs(self, y, x):\n max_y, max_x = self.stdscr.getmaxyx()\n delta_y, new_x = divmod(x, max_x)\n new_y = max(y + delta_y, 0)\n self.stdscr.move(new_y, new_x)", "def step(self):\n\n self.ball_x = self.ball_x + self.vel_x\n self.ball_y = self.ball_y + self.vel_y\n if self.ball_y >= 480:\n self.vel_y *= -1\n elif self.ball_y <= 0:\n self.vel_y *= -1\n if self.ball_x >= 640:\n self.vel_x *= -1\n elif self.ball_x <= 0:\n self.vel_x *= -1", "def move(self):\n self.pos += self.direc\n self.nearest_node = self.pixel_to_node()", "def update(self):\n pos = pygame.mouse.get_pos()\n self.rect.midtop = pos\n if self.punching:\n self.rect.move_ip(5, 10)", "def jump(self):\r\n if self.grounded == True:\r\n self.vel.y = -13", "def apply(self, target):\n return target.rect.move(self.state.topleft)", "def jump(self):\n\t\tself.vel = -10\n\t\tself.tick_count = 0\n\t\tself.height = self.y", "def update(self,target):\n print (self.width)\n print (self.height)\n # x = -target.rect.x + int(self.display_w/2)\n # y = -target.rect.y + int(self.display_h/2)\n x = -target.rect.x + int(self.display_w / 2)\n y = -target.rect.y + int(self.display_h / 2)\n self.camera = pygame.Rect(x,y,self.width,self.height)\n\n # x =min(0,x)\n # y=min(0,y)\n # x = max(-(self.width - self.display_w),x)\n # y = max(-(self.height-self.display_h),y)\n # self.camera=pygame.Rect(x,y,self.width,self.height)", "def movement(self):", "def move(self):\n \n self.position = self.explore()", "def update(self):\n pos = pygame.mouse.get_pos()\n self.rect.midtop = pos\n if self.punching:\n self.rect.move_ip(5, 10) # move fist position in place", "def move_to_target():\n keyboard.send('f')", "def move_up(self):\n #if user moves paddle right on top of screen, they won't be able to move it more upwards by using this if statement\n #SCREEN_HEIGHT - 20 = Exact number of pixels where paddle can stop exactly on top edge but still has its body fully shown\n if self.center.y < SCREEN_HEIGHT - 20:\n self.center.y += MOVE_AMOUNT", "def move_to_position2(self):", "def update(self):\n pygame.event.pump()\n self.pos_x -= 1.5", "def move_to(self, target):\n # type: (RoomPosition) -> None\n hive = self.home.hive\n home = self.find_home()\n origin = self.find_origin()\n\n total_distance = hive.honey.find_path_length(origin, target, self.new_movement_opts())\n\n min_distance_from_home = Infinity\n min_distance_to_origin = Infinity\n min_distance_to_target = movement.chebyshev_distance_room_pos(self.members_movement_order()[0].pos, target)\n max_distance_to_target = -Infinity\n any_hostiles = False\n for member in self.members:\n distance_to_home = movement.chebyshev_distance_room_pos(member.pos, home)\n distance_to_origin = movement.chebyshev_distance_room_pos(member.pos, origin)\n distance_to_target = movement.chebyshev_distance_room_pos(member.pos, target)\n if distance_to_home < min_distance_from_home:\n min_distance_from_home = distance_to_home\n if distance_to_target > max_distance_to_target:\n max_distance_to_target = distance_to_target\n if distance_to_origin < min_distance_to_origin:\n min_distance_to_origin = distance_to_origin\n if len(member.room.find(FIND_HOSTILE_CREEPS)):\n any_hostiles = True\n\n if min_distance_to_origin > 100:\n mv_order = self.members_movement_order()\n self.set_origin(mv_order[len(mv_order) - 1].pos)\n if min_distance_from_home < 50 and (max_distance_to_target < total_distance / 2):\n self.log(\"move_to: chose stage 0 (minimum distance from home: {}, maximum distance from home: {},\"\n \" total distance: {})\"\n .format(min_distance_from_home, max_distance_to_target, total_distance))\n self.move_to_stage_0(target)\n elif min_distance_to_target < 300 and any_hostiles:\n self.move_to_stage_2(target)\n elif min_distance_to_target > 60 or max_distance_to_target > 200:\n # self.log(\"move_to: chose stage 1 (minimum distance from home: {}, total distance: {}, \"\n # \"minimum distance to target: {}, maximum distance to target: {})\"\n # .format(min_distance_from_home, total_distance,\n # min_distance_to_target, max_distance_to_target))\n self.move_to_stage_1(target, any_hostiles)\n else:\n # self.log(\"move_to: chose stage 2 (minimum distance from home: {}, total distance: {}, \"\n # \"minimum distance to target: {}, maximum distance to target: {})\"\n # .format(min_distance_from_home, total_distance,\n # min_distance_to_target, max_distance_to_target))\n self.move_to_stage_2(target)", "def MoveCurrentSpace(self):\n if self.facing == 0:\n self.y -= 1\n elif self.facing == 1:\n self.x += 1\n elif self.facing == 2:\n self.y += 1\n elif self.facing == 3:\n self.x -= 1", "def moving(self):\n self.animation()\n assert(self.rect.x % 32 == 0 or self.rect.y % 32 == 0), \\\n 'Not centered on tile'", "def move_friendly(self):\n self.friendly_pos[0]+=self.x_speed\n self.friendly_pos[1]+=self.y_speed", "def move_to_position1(self):", "def target_nearest_enemy():\n keyboard.send('ctrl+tab')", "def move(self, is_forward):\n wh, lh = self.get_heading\n self.w += wh\n self.l += lh\n if self.get_pos() == blocks['wall']:\n self.w -= wh\n self.l -= lh", "def faceTowards(self, target):\n current_tile = self.current_tile()\n if(target and current_tile):\n x_dist = target.coordinates()[0] - current_tile.coordinates()[0]\n if x_dist == 0: return\n self.direction_val = x_dist/abs(x_dist)\n #TEMP\n if self.direction_val == -1:\n self.direction_id = 'left'\n if self.direction_val == 1:\n self.direction_id = 'right'", "def targeting(self):\r\n self.temp = PVector.sub(self.pos, self.target)\r\n self.temp.normalize()\r\n self.acceleration.sub(self.temp.setMag(1))", "def take_damage(self):\n if self.cur_hp > 0:\n self.cur_hp -= 1\n self.hp_rect.width = (self.cur_hp / self.max_hp) * self.hp_bar_width", "def move(self, target=None):\n visible_tiles = vision.vision(15, self.world_map, self.tile)\n visible_tiles = filter(BeesSprite.pollinated_filter, visible_tiles)\n target_tile = vision.find_target(visible_tiles, self.prey)\n if target_tile:\n move_to_tile = vision.approach(self.tile, target_tile, self.world_map)\n if self.is_movable_terrain(move_to_tile) and \\\n self.not_contains_sprite(move_to_tile, self.prey):\n if move_to_tile == target_tile:\n move_to_tile.contains_sprite.pollinate()\n AnimalSprite.move(self, move_to_tile)\n else:\n AnimalSprite.move(self)\n else:\n AnimalSprite.move(self)", "def move(self):\n pass", "def update(self, *args):\n self.rect.x -= args[0]", "def go_right(self):\n self.rect.centerx += 9", "def home(self, max_dist=150, reset_pos=True): \n while not self.lim_cw:\n self.move_cm(True, max_dist, velocity=1)\n if reset_pos:\n self.step_position = 0\n self.homed = True", "def move(self, p):\r\n self.position.setvalue(p)", "def update(self):\n # Requirement ID: 8.0.3\n\n rn1 = (random.randint(1,101))\n if rn1 < 70 and self.rect.right < self.screen_rect.right - 10:\n self.rect.y += (rn1*1.000/25.000)\n\n rn2 = (random.randint(1,101))\n if rn2 <= 50 and self.rect.right < self.screen_rect.right - 10:\n self.rect.x += (rn2 / 90.0000) + 1\n\n if rn2 > 50 and self.rect.left > self.screen_rect.left + 10:\n self.rect.x -= ((100 - rn2) / 90.0000)", "def move_down(self):\n #if user moves paddle right below on the screen, they won't be able to move it more downwards by using this if statement\n #SCREEN_HEIGHT - 280 = Exact number of pixels where paddle can stop exactly on bottom edge but still has its body fully shown\n if self.center.y > SCREEN_HEIGHT - 280:\n self.center.y -= MOVE_AMOUNT", "def move(self, x, y):\r\n self.rect_thumb.move_ip(x,y)", "def bring_down(self):\n\n self.move(self.__min_step__)", "def jump(self):\n \n # move down and see if there's a platform below us.\n # Move down 2 pixels because it doesn't work well if you only move down\n # 1 when working with a platform moving down.\n self.rect.y += 2\n platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n self.rect.y -= 2\n \n # If it is ok to jump, set the speed upwards\n if len(platform_hit_list) > 0 or self.rect.bottom >= SCREEN_HEIGHT:\n self.change_y = -10", "def advance(self): \n self.center.x = self.center.x + self.velocity.dx\n self.center.y = self.center.y + self.velocity.dy", "def jump(self):\n \n # move down a bit and see if there is a platform below us.\n # Move down 2 pixels because it doesn't work well if we only move down 1\n # when working with a platform moving down.\n self.rect.y += 2\n platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n self.rect.y -= 2\n \n # If it is ok to jump, set our speed upwards\n if len(platform_hit_list) > 0 or self.rect.bottom >= SCR_HEIGHT:\n self.change_y = -8", "def update(self):\r\n self.rect.y += 12\r\n\r\n if self.rect.y > 500:\r\n self.rect.y = random.randrange(-1000, -60)", "def update(self):\r\n if self.right > games.screen.width or self.left < 0:\r\n self.dx = -self.dx\r\n \r\n if self.bottom > games.screen.height or self.top < 0:\r\n self.dy = -self.dy", "def move_up(self):\n\n if self.ycor() > 115:\n self.sety(130)\n else:\n new_y = self.ycor() + 40\n self.sety(new_y)", "def retarget(self):\n if self.retargetCount < self.retargetGoal:\n self.retargetCount += 1\n else:\n self.retargetCount = 0\n self.setCurrentTarget()\n self.setMode()", "def up(self):\n self.move(0, 1)", "def move():\n if randrange(40) == 0:\n y = randrange(-150, 150)\n target = vector(200, y)\n targets.append(target)\n\n for target in targets: # velocidad de los targets\n target.x -= target_speed\n\n if inside(ball):\n speed.y -= 0.35\n ball.move(speed)\n\n dupe = targets.copy()\n targets.clear()\n\n for target in dupe:\n if abs(target - ball) > 13:\n targets.append(target)\n\n for target in targets:\n if not inside(target):\n target.x = 200\n\n draw()\n\n ontimer(move, 50)", "def assign_leftLimit():\r\n player.rect.x = 25", "def yview_moveto(self, fraction):\n self.tk.call(self._w, 'yview', 'moveto', fraction)", "def update(self):\n self.x -= self.speed\n self.beam_rect.x = self.x", "def steerright(self):\n self.direction = self.direction-self.steering\n if self.direction < 0:\n self.direction = 360-90\n self.image, self.rect = rot_center(self.image_orig,self.rect,self.direction)", "def jump(self):\n \n # move down a bit and see if there is a platform below us.\n # Move down 2 pixels because it doesn't work well if we only move down 1\n # when working with a platform moving down.\n self.rect.y += 2\n platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n self.rect.y -= 2\n \n # If it is ok to jump, set our speed upwards\n if len(platform_hit_list) > 0: #or self.rect.bottom >= SCREEN_HEIGHT:\n self.change_y = -10", "def moveBasedOnRetreatAction(self, time_passed):\n cpos = self.toScreenCoordinate()\n mpos = pygame.mouse.get_pos()\n toMouse = Vector2.from_points(cpos,mpos)\n toMouse.normalize()\n rheading = -toMouse\n \n heading = self.heading\n angle_between = heading.angle_between(rheading)\n if angle_between>=-30 and angle_between<=30:\n return\n \n distance = time_passed * self.speed\n movement = rheading * distance\n x = movement.get_x()\n y = movement.get_y()\n if not self.checkCollision(x, y) and self.checkValidCoord(x, y):\n self.move(x, y)", "def step(self, move):", "def move(self, center):\n\t\t#print \"made it\"\n\t\tself.rect = self.rect.move(center)", "def update(self):\n super().update()\n if self.center_y > TOP_LIMIT:\n self.center_y = BOTTOM_LIMIT\n if self.center_y < BOTTOM_LIMIT:\n self.center_y = TOP_LIMIT\n\n if self.center_x < 250:\n self.change_x = (0.2) * OBJECTS_SPEED\n elif self.center_x > SCREEN_WIDTH - 250:\n self.change_x = (-0.2) * OBJECTS_SPEED", "def movement(self, screen):\n if self.tx is not None and self.ty is not None: # Target is set\n\n X = self.x - self.tx\n Y = self.y - self.ty\n\n if X < 0: # --->\n self.img = pygame.image.load(next(self.walking_east_images))\n self.x += self.velocity\n elif X > 0: # <----\n self.img = pygame.image.load(next(self.walking_west_images))\n self.x -= self.velocity\n if Y > 0: # up\n self.img = pygame.image.load(next(self.walking_north_images))\n self.y -= self.velocity\n elif Y < 0: # dopwn\n self.img = pygame.image.load(next(self.walking_south_images))\n self.y += self.velocity\n screen.blit(self.img, (self.x, self.y))\n\n if X == 0 and Y == 0:\n self.tx, self.ty = None, None\n self.agent.actionCompleted()", "def move(self):\n self.position += self.speed", "def _perform_landing(self):\n self.y += self.settings.mario_jump_speed\n if self.y >= self.settings.mario_y_pos:\n self.y = self.settings.mario_y_pos\n self.jumping = 0\n self.is_currently_jumping = False", "def update(self):\n self.rect.y += self.speedy\n if self.rect.bottom < 0:\n self.kill()", "def update(self):\n self.rect.y += self.speedy\n if self.rect.bottom < 0:\n self.kill()", "def page_up(self):\n self.set_initial_offset(self.initial_offset - self.my_surface.get_height())", "def moveStep(self):\n\t\tif self.pos[0] < self.boundsX[0] or \\\n\t\t\tself.pos[0] > (self.boundsX[1] - self.width):\n\t\t\t\tself.dir[0] *= -1\n\t\tif self.pos[1] < self.boundsY[0] or \\\n\t\t self.pos[1] > (self.boundsY[1] - self.height):\n\t\t\t\tself.dir[1] *= -1\n\t\t\t\n\t\tself.pos[0] += self.dir[0]*self.speed\n\t\tself.pos[1] += self.dir[1]*self.speed", "def move_up(self):\n self.move_step(-1)", "def _move(self, event):\n if self._current_tower.get_value() > self._coins:\n return\n\n #move the shadow tower to mouse position\n position = event.x, event.y\n self._current_tower.position = position\n\n legal, grid_path = self._game.attempt_placement(position)\n\n #find the best path and covert positions to pixel positions\n path = [self._game.grid.cell_to_pixel_centre(position)\n for position in grid_path.get_shortest()]\n\n #Task 1.2 (Tower placement): Draw the tower preview here\n self._view.draw_preview(self._current_tower, legal)\n self._view.draw_path(path)", "def update(self):\n self.pos_x -=1", "def move_unit(self, obs, for_subgroup=False):\n target = self.target\n if for_subgroup:\n current_location = self.get_avg_location_of_self_subgroup(obs)\n else:\n current_location = self.get_current_location(obs)\n dist_to_target = 2\n if ((abs(current_location[0] - target[0]) >= dist_to_target) or\n (abs(current_location[1] - target[1]) >= dist_to_target)):\n return {\n \"function\": actions.FUNCTIONS.Move_screen(\"now\", (self.target[0], self.target[1])),\n \"status\": \"MOVING_TO_TARGET\"\n }\n else:\n return {\n \"function\": actions.FUNCTIONS.Move_screen(\"now\", (self.target[0], self.target[1])),\n \"status\": \"ARRIVED_AT_TARGET\"\n }" ]
[ "0.66254646", "0.65339065", "0.65005153", "0.64133626", "0.6320612", "0.6276562", "0.6270425", "0.62666744", "0.62145644", "0.6178224", "0.6135446", "0.61301184", "0.6129795", "0.61164856", "0.6073802", "0.6068467", "0.6010894", "0.6008223", "0.5989418", "0.5989335", "0.5984468", "0.5976463", "0.5954948", "0.59352046", "0.5934136", "0.59162384", "0.5881952", "0.587772", "0.5869929", "0.5869735", "0.5868177", "0.586092", "0.58291996", "0.5822391", "0.581958", "0.58155143", "0.5793087", "0.5780555", "0.57767403", "0.57735294", "0.577344", "0.5771324", "0.57689315", "0.5767117", "0.5762234", "0.57528394", "0.5738328", "0.57365704", "0.57359505", "0.57336706", "0.57210875", "0.571526", "0.57130647", "0.57082844", "0.5707111", "0.5695773", "0.5690948", "0.56907696", "0.5673541", "0.56717867", "0.56632876", "0.5659264", "0.56555754", "0.5646132", "0.56436783", "0.56421256", "0.5638587", "0.56373996", "0.56346244", "0.5631826", "0.563053", "0.56264997", "0.5624557", "0.56244624", "0.5621649", "0.5619167", "0.56181306", "0.5616854", "0.5611097", "0.5604947", "0.55955887", "0.5593342", "0.559161", "0.55902576", "0.5588707", "0.5583588", "0.5583283", "0.55654603", "0.5562335", "0.5552074", "0.55503285", "0.5548625", "0.5544892", "0.5544892", "0.5539861", "0.55354655", "0.5533438", "0.55318844", "0.5519504", "0.5512383" ]
0.68412036
0
Checks top to target to see if it hit top of screen
def check_top(self): if self.rect.top <=0: self.target_direction = 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_autos_top(self):\n\t\tscreen_rect = self.screen.get_rect()\n\t\tfor auto in self.autos.sprites():\n\t\t\tif auto.rect.top <= screen_rect.top:\n\t\t\t\t# Treat this the same as if the pigeon got hit.\n\t\t\t\tself._pigeon_hit()\n\t\t\t\tbreak", "def hits_top_or_bottom(self):\n if self.y >= self.scene.screen.get_height() - self.image.get_height() or self.y <= 0:\n return True\n else:\n return False", "def isTop(self):\n return self.top", "def did_collide_top_bottom(self):\n\n y_coord = self.getY()\n return y_coord < 0 or (y_coord + self.ball_size[1]) > Configuration.windowHeight", "def top_visible(self) -> bool:\n return self.vertical_scroll == 0", "def check_in_screen(self):\n if self.rect.colliderect(screen_rect) and not self.moving:\n return True\n return False", "def is_target(self):\n\t\treturn self.window and self.window.target is self", "def at_target(self):\n return self.location == self.target_location", "def check_edges(self):\n\t\tbottom_screen_limit = 2 * self.rect.height\n\t\tscreen_rect = self.screen.get_rect()\n\t\tif (self.rect.top <= 100) or (self.rect.bottom >= self.screen_rect.bottom):\n\t\t#self.rect.bottom >= self.screen_rect.bottom:\n\t\t\treturn True", "def is_target(top_container):\n\tif '.' not in top_container.get('barcode', ''):\n\t\treturn True\n\telse:\n\t\treturn False", "def check_ball_on_target():\n\n pass", "def always_top(self) -> bool:\n return bool(self.tk_ref.wm_attributes('-topmost'))", "def update_target(self):\n\t\tself.check_top()\n\t\tself.check_bottom()\n\t\tself.update()\n\t\tself.screen.fill(self.target_color, self.rect)", "def IsTopSnappable(self):\r\n \r\n return self.HasFlag(self.optionTopSnapped)", "def testPsychOnTop(self):\n attr = self.session.create_visit_attr()\n\n self.util.intTypeTest(self, attr, \"on_top\")\n\n self.util.intPropertyTest(self, attr, \"on_top\")", "def HasGripperTop(self):\r\n\r\n return self.HasFlag(self.optionGripperTop)", "def check_edges(self):\n\t\tscreen_rect = self.screen.get_rect()\n\t\tif self.rect.bottom >= screen_rect.bottom or self.rect.top <= -1:\n\t\t\treturn True", "def check_for_top(self) -> bool:\n\t\tboolean_expression_has_top = False\n\t\texpression_has_top = False\n\t\tif self.boolean_expression:\n\t\t\tboolean_expression_has_top = self.boolean_expression.check_for_top()\n\t\tif self.expression:\n\t\t\texpression_has_top = self.expression.check_for_top()\n\t\treturn boolean_expression_has_top or expression_has_top", "def checkBottom(self):\n exposed = True\n for sprite in self.overlapping_sprites:\n if sprite not in self.game.neutrinos:\n a = abs(self.bottom - sprite.top)\n b = abs(self.top - sprite.bottom)\n c = abs(self.left - sprite.right)\n d = abs(self.right - sprite.left)\n if a < b and a < c and a < d:\n exposed = False\n break\n return exposed", "def top(self):\n if self.goals:\n return self.goals[self.stack[-1]]\n else:\n return False", "def sees_home_tag(self):\n detections = self.swarmie.get_latest_targets().detections\n\n for detection in detections:\n if detection.id == 256:\n return True\n\n return False", "def HitTest(self, x, y):\r\n\r\n if self.target.GetScreenRect().Contains((x, y)):\r\n return wx.ALL\r\n\r\n return -1", "def check_off_screen(self):\r\n for bullet in self.bullets:\r\n if bullet.is_off_screen(SCREEN_WIDTH, SCREEN_HEIGHT):\r\n self.bullets.remove(bullet)\r\n\r\n for target in self.targets:\r\n if target.is_off_screen(SCREEN_WIDTH, SCREEN_HEIGHT):\r\n self.targets.remove(target)\r\n # if standard and strong target off the screen, it loses 1 point. Otherwise, it remains the score\r\n if not (target.type == \"Bonus\" or target.type == \"Safe\"):\r\n self.score -= 1", "def outOfScreen(self):\n x,y = self.currentLevel.transformToScreenCoordinate(self.position)\n w,h = cblocals.GAME_SCREEN_SIZE\n if x<0 or y<0 or x>x or y>h:\n return True\n return False", "def has_target(self):\n return self.target is not None", "def _test_display_up_button(self):\n return (self.product_displays.top_index > 0)", "async def is_target_reached(self) -> bool: # type: ignore\n ...", "def canStack(bottom, top):\n bw, bh, bd = bottom\n tw, th, td = top\n return (bw < tw) and (bh < th) and (bd < td)", "def need_target(self):\n\t\t# if we search for color targets, we are using cpu and potentially throwing the copter off the real target\n\t\t# can make this decision more complex if needded\n\t\tdelta_t = time() - self.t_last_seen\n\t\ttime_to_see = delta_t > .6 # arbitrary time threshold over which we should probably look for color targets\n\t\t\n\t\treturn time_to_see", "def _isInScreen(self, pos):\n if type(pos) is Vec2:\n return pos.y >= 0 and pos.y <= self.screenSize[1] and pos.x >= 0 \\\n and pos.x <= self.screenSize[0]\n\n return pos[1] >= 0 and pos[1] <= self.screenSize[1] and pos[0] >= 0 \\\n and pos[0] <= self.screenSize[0]", "def offscreen(self, screen):\n if self.x < 0:\n return True\n elif self.y < 0:\n return True\n elif self.x + self.width > screen.get_width():\n return True\n elif self.y + self.height > screen.get_height():\n return True\n return False", "def is_visible(self):\n return self.rect.x < self.screen_rect.width", "def _check_aliens_bottom(self):\n screen_rect = self.screen.get_rect()\n for alien in self.aliens.sprites():\n if alien.rect.bottom >= screen_rect.bottom:\n self._ship_hit()\n break", "def _check_aliens_bottom(self):\n screen_rect = self.screen.get_rect()\n for alien in self.aliens.sprites():\n if alien.rect.bottom >= screen_rect.bottom:\n self._ship_hit()\n break", "def __hit_paddle(self, g_object):\n return g_object == self.__paddle", "def find_valid_position(self, position: pygame.math.Vector2) -> bool:\n\n window_rect = self.ui_manager.get_root_container().rect\n\n if window_rect.contains(pygame.Rect(int(position[0]), int(position[1]), 1, 1)):\n self.rect.left = int(position.x)\n self.rect.top = int(position.y + self.hover_distance_from_target[1])\n\n if window_rect.contains(self.rect):\n self.relative_rect = self.rect.copy()\n self.text_block.set_position(self.rect.topleft)\n return True\n else:\n if self.rect.bottom > window_rect.bottom:\n self.rect.bottom = int(position.y - self.hover_distance_from_target[1])\n if self.rect.right > window_rect.right:\n self.rect.right = window_rect.right - self.hover_distance_from_target[0]\n if self.rect.left < window_rect.left:\n self.rect.left = window_rect.left + self.hover_distance_from_target[0]\n\n if window_rect.contains(self.rect):\n self.relative_rect = self.rect.copy()\n self.text_block.set_position(self.rect.topleft)\n return True\n else:\n self.relative_rect = self.rect.copy()\n warnings.warn(\"Unable to fit tool tip on screen\")\n return False\n else:\n self.relative_rect = self.rect.copy()\n warnings.warn(\"initial position for tool tip is off screen,\"\n \" unable to find valid position\")\n return False", "def _is_screen(grid):\n for e in range(grid.edges.shape[1]):\n if len([j for i in grid.element_edges for j in i if j == e]) < 2:\n return True\n return False", "def _check_aliens_bottom(self):\n screen_rect = self.screen.get_rect()\n for alien in self.aliens.sprites():\n if alien.rect.bottom >= screen_rect.bottom:\n # Treat the same as if ship got hit.\n self._ship_hit()\n break", "def isInGoal(self):\n coordx= self.playerPos.x\n coordy= self.playerPos.y\n target = 0 if self.id_team == 1 else 1\n\n if((((target == 0)and (coordx<=5))|\n ((target == 1) and(coordx>145))) \n and (coordy<=50 and coordy>=40)):\n return True\n else:\n return False", "def is_target_in(self, newtarget, buffer_safe_width=0.025):\n from ..utils.shape import HAS_SHAPELY\n # Test if shapely\n if not HAS_SHAPELY:\n print(\"WARNING: could not test if the target is in the image since you do not have SHAPELY\")\n return True\n from ..utils.shape import Point\n\n centroid = self.get_centroid(system=\"xy\")\n radius_pixels = (0.6-buffer_safe_width)* self.units_to_pixels(\"deg\").value\n fov = Point(*centroid).buffer(radius_pixels)\n targetloc = Point(*self.coords_to_pixel(*newtarget.radec))\n return fov.contains(targetloc)", "def game_active():\n im = region_grabber((0, 0, 110, 30))\n pos = imagesearcharea(\"Images/title.jpg\", 0, 0, 0, 0, 0.9, im) # Black background\n return pos != [-1, -1]", "def has_target(self):\n return self._has_target", "def check_edges(self):\r\n screen_rect = self.screen.get_rect()\r\n if self.rect.right >= screen_rect.right:\r\n return True\r\n elif self.rect.left <= 0:\r\n return True", "def _targeting_mode(self):\n if self._stack:\n pos = self._stack.pop(0)\n hit = grid.shoot(pos)\n shot = hit.cell\n # if we hit a ship\n if shot in SHIPS:\n self._target_ships.add(shot)\n self._stack += self._get_neighbours(pos)\n # if we sunk a ship\n if hit.result == SUNK_SHIP:\n self._target_ships.remove(shot)\n log(\"[TARGET]: Sunk \" + SHIP_NAME[shot] + \" at \" + str(pos))\n if not self._target_ships:\n self._stack = []\n self._mode = HUNTING\n log(\"[TARGET]: All targets destroyed, return to hunt.\")\n # if we just hit a ship\n else:\n log(\"[TARGET]: Hit a ship at \" + str(pos))\n elif shot == WATER:\n log(\"[TARGET]: Missed at \" + str(pos))\n # if we already hit the position\n if shot in HITS:\n shot = self.fire()\n else:\n self.shots.add(pos)\n return shot\n # if stack is empty, go back to hunting mode\n else:\n self._mode = HUNTING\n return self.fire()", "def isCurrentPlayerHome(self):\r\n \r\n #creates corresponding starting and ending points for each player\r\n if self.getTurn() == RED:\r\n start = 0\r\n end = 18\r\n else:\r\n start = 6\r\n end = 24\r\n \r\n #checks whether the current player has checkers on corresponding points\r\n for i in range(start, end):\r\n if self.points[i].getTeam() == self.getTurn():\r\n return False\r\n \r\n return True", "def isNearTo(self, point):\n # BBB: I'm using a majored version of the collide rect to fix a problem with a charas-bouncing-effect on movement... :-|\n x, y = self.currentLevel.transformToScreenCoordinate(point)\n collide_rect = self.collide_rect\n collide_rect.height+=3\n return collide_rect.collidepoint(x, y)", "def _has_arrived(self, context) -> bool:\n return self._target[0] == context.x and self._target[1] == context.y", "def update(self):\n if self.top > games.screen.height:\n self.destroy()", "def check():\n mouse = pygame.mouse.get_pos()\n\n # mouse[0] = x-coordinate of mouse position.\n # mouse[1] = y-coordinate of mouse position.\n if box1.x + box1.size > mouse[0] > box1.x and box1.y + box1.size > mouse[1] > box1.y:\n return True\n elif box2.x + box2.size > mouse[0] > box2.x and box2.y + box2.size > mouse[1] > box2.y:\n return False\n return None", "def check_edges(self):\n screen_rect = self.screen.get_rect()\n if self.rect.right >= screen_rect.right:\n return True\n elif self.rect.left <= screen_rect.left:\n return True", "def _check_stars_bottom(self):\n screen_rect = self.screen.get_rect()\n for stars in self.stars.sprites():\n if stars.rect.bottom >= screen_rect.bottom:\n self._ship_hit()\n break", "def time_to_move(self):\r\n if int(self.pix_pos.x+TOP_BOTTOM_BUFFER//2) % self.app.cell_width == 0:\r\n if self.direction == vec(1, 0) or self.direction == vec(-1, 0) or self.direction == vec(0, 0):\r\n return True\r\n # for the x-direction\r\n\r\n if int(self.pix_pos.y+TOP_BOTTOM_BUFFER//2) % self.app.cell_height == 0:\r\n if self.direction == vec(0, 1) or self.direction == vec(0, -1) or self.direction == vec(0, 0):\r\n return True\r\n # for the y-direction\r\n\r\n # checks to see if the player is still within the bounds\r", "def check_shot_on_target(self, shot):\n # Defining a few variables to ease the reading\n # Here we define the x and y interval of the goal's segment\n x_min = min(self.s_pos.x, self.e_pos.x)\n x_max = max(self.s_pos.x, self.e_pos.x)\n\n y_min = min(self.s_pos.y, self.e_pos.y)\n y_max = max(self.s_pos.y, self.e_pos.y)\n\n # Shortening variables names\n o_x = shot.opponent.pos.x\n o_y = shot.opponent.pos.y\n\n # If the angle = pi / 2 or - pi / 2, then tan(angle) is undefined\n # In these cases, the shot is vertical, therefore it is valid\n # iff the x coordinate of the opponent is in the goal's x interval\n if abs(shot.angle) == math.pi / 2:\n return self.is_in_interval(x_min, x_max, o_x)\n\n # If the angle = 0, pi or -pi, then tan(angle) is 0 which can lead to \n # undefined intersection points (if the goal is vertical for example)\n # although there is an intersection point\n # \n # In these cases, the shot is horizontal, therefore it is valid\n # iff the y coordinate of the opponent is in the goal's y interval\n if abs(shot.angle) == math.pi or shot.angle == 0:\n return self.is_in_interval(y_min, y_max, o_y)\n\n # Using tan the least amount of time possible, for this is a slow function\n tan_theta = math.tan(shot.angle)\n\n # Define the LE of the shot\n le1 = LinearEquation(tan_theta, o_y - tan_theta * o_x)\n le2 = None\n\n # If the goal is vertical, finding the intersection point\n # is not possible using the normal way\n #\n # That being said, unless the LE of the shot is vertical too (which it \n # isn't as it is checked before hand) there has to be an intersection point\n # This intersection must happen when at the x coodinate of the goal's segment\n # therefore, it is possible to compute the y coordinate of the intersection by\n # computing the application of the shot's LE on this ex coordinate\n #\n # Then, the resulting y is valid iff it is in the goal's segment interval\n if self.e_pos.x - self.s_pos.x == 0:\n y = le1.apply(self.e_pos.x)\n return self.is_in_interval(y_min, y_max, y)\n\n # The normal way of solving the intersection of these two LEs\n else:\n\n # Shortening variables by computing the coefficient of the goal's LE\n ratio = (self.e_pos.y - self.s_pos.y) / (self.e_pos.x - self.s_pos.x)\n\n # If the lines are parallels (have the same coefficient) return False\n if math.tan(shot.angle) == ratio:\n return False\n\n # Defining the goal's LE\n le2 = LinearEquation(ratio, self.e_pos.y - self.e_pos.x * ratio)\n\n # Finding the intersection point of the two LEs\n # If there isn't one, return False (but there should be one\n # given all the asserts we do before hand, this is just for completion sake)\n p_intersect = le1.intersection(le2)\n if p_intersect == None:\n return False\n\n # If the intersection point's abscissa is in the goal's x interval, then it is\n # a valid abstracted shot going \n return self.is_in_interval(x_min, x_max, p_intersect.x)", "def is_on_ground(self):\n return bool(self.ground_sprites())", "def check_edges(self):\n screen_rect = self.screen.get_rect()\n if self.rect.right >= screen_rect.right or self.rect.left <= 0:\n return True", "def isHittingLow(self):\n return not self.limLow.get()", "def is_target_buy_list_overrides_screen_loaded(self):\n return self.is_element_visible(self.target_buy_list_overrides_page_header_locator)", "def is_spot_possible(left, right, bottom, top):\n return True\n if right < 6 or bottom < 6:\n # print(\"IMPOSSIBLE\", left, right, top, bottom)\n return False\n if left > 18 or top > 18:\n # print(\"IMPOSSIBLE\", left, right, top, bottom)\n return False\n if abs(top - bottom) > 16 or abs(right - left) > 16:\n # print(\"IMPOSSIBLE\", left, right, top, bottom)\n return False\n return True", "def _test_display_down_button(self):\n return (self.product_displays.top_index + self.limits.screen_products) < len(self.product_displays)", "def IsTarget(self, target_name):\n return target_name in self.GetTargets()", "def check_edges(self):\n\t\tscreen_rect = self.screen.get_rect()\n\t\tif self.rect.right >= screen_rect.right:\n\t\t\treturn True\n\t\telif self.rect.left <= 0:\n\t\t\treturn True", "def is_node_onscreen(self, node, screen_edges):\n real_node = self.original_graph.get_node_by_serial(node.serial)\n node_x = real_node.x\n node_y = real_node.y\n node_r = node.get_radius() * 0.05\n return (node_x + node_r) > screen_edges[\"bottom_left\"].get_x() and \\\n (node_x - node_r) < screen_edges[\"top_right\"].get_x() and \\\n (node_y + node_r) > screen_edges[\"bottom_left\"].get_y() and \\\n (node_y - node_r) < screen_edges[\"top_right\"].get_y()", "def isScrollListInFocus(self):\n try:\n for myGui in self.gui:\n if myGui.__module__ == 'anw.gui.buttonlist':\n if myGui.myScrolledList.isMouseInRegion == True:\n return True\n elif myGui.__module__ == 'anw.gui.systemmenu':\n if myGui.newtradelist.myScrolledList.isMouseInRegion == True:\n return True\n return False\n except:\n return False", "def checkOver(self, owner, messages):\n over = self.turns == 0\n \n if over:\n self.cure(owner)\n messages.append(owner.getHeader() + self.over)\n else:\n self.turns -= 1\n messages.append(owner.getHeader() + self.start)\n return over", "def impact(self, ground):\n return self.position[1] > ground", "def isstart(self) -> bool:\n if len(self._pile) != self._pos + 1:\n return False\n visible_count = 0\n hidden_count = 0\n for c_card in self._pile:\n if c_card.visible:\n visible_count += 1\n else:\n hidden_count += 1\n return hidden_count == self._pos and visible_count == 1", "def find_start_stack(blk):\n if blk is None:\n return False\n if find_top_block(blk).name == 'start':\n return True\n else:\n return False", "def inside(self, uv):\n result = self._trimmed.Perform(gp_Pnt2d(uv[0], uv[1]))\n return result == TopAbs_IN", "def check_stack_update_goal(place_check=False, top_idx=-1, depth_img=None):\n current_stack_goal = nonlocal_variables['stack'].current_sequence_progress()\n # no need to reset by default\n needed_to_reset = False\n if place_check:\n # Only reset while placing if the stack decreases in height!\n stack_shift = 1\n elif current_stack_goal is not None:\n # only the place check expects the current goal to be met\n current_stack_goal = current_stack_goal[:-1]\n stack_shift = 0\n # TODO(ahundt) BUG Figure out why a real stack of size 2 or 3 and a push which touches no blocks does not pass the stack_check and ends up a MISMATCH in need of reset. (update: may now be fixed, double check then delete when confirmed)\n if check_row:\n stack_matches_goal, nonlocal_variables['stack_height'] = robot.check_row(current_stack_goal, num_obj=num_obj, check_z_height=check_z_height, valid_depth_heightmap=valid_depth_heightmap, prev_z_height=nonlocal_variables['prev_stack_height'])\n # Note that for rows, a single action can make a row (horizontal stack) go from size 1 to a much larger number like 4.\n if not check_z_height:\n stack_matches_goal = nonlocal_variables['stack_height'] >= len(current_stack_goal)\n elif check_z_height:\n # decrease_threshold = None # None means decrease_threshold will be disabled\n stack_matches_goal, nonlocal_variables['stack_height'], needed_to_reset = robot.check_z_height(depth_img, nonlocal_variables['prev_stack_height'])\n max_workspace_height = ' (see max_workspace_height printout above) '\n # TODO(ahundt) add a separate case for incremental height where continuous heights are converted back to height where 1.0 is the height of a block.\n # stack_matches_goal, nonlocal_variables['stack_height'] = robot.check_incremental_height(input_img, current_stack_goal)\n else:\n stack_matches_goal, nonlocal_variables['stack_height'] = robot.check_stack(current_stack_goal, top_idx=top_idx)\n nonlocal_variables['partial_stack_success'] = stack_matches_goal\n if not check_z_height:\n if nonlocal_variables['stack_height'] == 1:\n # A stack of size 1 does not meet the criteria for a partial stack success\n nonlocal_variables['partial_stack_success'] = False\n nonlocal_variables['stack_success'] = False\n max_workspace_height = len(current_stack_goal) - stack_shift\n # Has that stack gotten shorter than it was before? If so we need to reset\n needed_to_reset = nonlocal_variables['stack_height'] < max_workspace_height or nonlocal_variables['stack_height'] < nonlocal_variables['prev_stack_height']\n\n print('check_stack() stack_height: ' + str(nonlocal_variables['stack_height']) + ' stack matches current goal: ' + str(stack_matches_goal) + ' partial_stack_success: ' +\n str(nonlocal_variables['partial_stack_success']) + ' Does the code think a reset is needed: ' + str(needed_to_reset))\n # if place and needed_to_reset:\n # TODO(ahundt) BUG may reset push/grasp success too aggressively. If statement above and below for debugging, remove commented line after debugging complete\n if needed_to_reset or evaluate_random_objects:\n # we are two blocks off the goal, reset the scene.\n mismatch_str = 'main.py check_stack() DETECTED PROGRESS REVERSAL, mismatch between the goal height: ' + str(max_workspace_height) + ' and current workspace stack height: ' + str(nonlocal_variables['stack_height'])\n if not disable_situation_removal:\n mismatch_str += ', RESETTING the objects, goals, and action success to FALSE...'\n print(mismatch_str)\n if not disable_situation_removal:\n # this reset is appropriate for stacking, but not checking rows\n get_and_save_images(robot, workspace_limits, heightmap_resolution, logger, trainer, '1')\n robot.reposition_objects()\n nonlocal_variables['stack'].reset_sequence()\n nonlocal_variables['stack'].next()\n # We needed to reset, so the stack must have been knocked over!\n # all rewards and success checks are False!\n set_nonlocal_success_variables_false()\n nonlocal_variables['trial_complete'] = True\n if check_row:\n # on reset get the current row state\n _, nonlocal_variables['stack_height'] = robot.check_row(current_stack_goal, num_obj=num_obj, check_z_height=check_z_height, valid_depth_heightmap=valid_depth_heightmap)\n nonlocal_variables['prev_stack_height'] = copy.deepcopy(nonlocal_variables['stack_height'])\n return needed_to_reset", "def _checkRoundOver(self):\n\n if not any(player.isAlive() for player in self.teams[0].players):\n self.endGame()", "def can_jump(self) -> bool:\n \n vy = self.player_sprite.change_y\n return vy == 0\n \n self.player_sprite.center_y -= 2\n bottom = self.player_sprite.bottom\n top = self.player_sprite.top\n\n hit_list = arcade.check_for_collision_with_list(self.player_sprite,\n self.platforms)\n self.player_sprite.center_y += 2\n\n for other in hit_list:\n if bottom < other.top and top > other.center_y:\n return True\n else:\n return False", "def checkGameState(self, fpsclock, screen):\n if self.isWin() or self.isLost():\n if self.exitMenu(fpsclock, screen):\n return True\n return False", "def __hit_bricks(self, g_object):\n return type(g_object) == GRect and g_object != self.__paddle", "def IsTopDockable(self):\r\n \r\n return self.HasFlag(self.optionTopDockable)", "def check_pos(self, x, y):\n if x >= WINDOWWIDTH or y >= WINDOWHEIGHT or x <=0 or y <= 0:\n return True", "def is_map_obstacle_in_screen_range(self):\n raise NotImplementedError", "def in_zone(self, bottom_left, top_right):\n return (bottom_left.x <= self.pos.x and self.pos.x <= bottom_left.x and\n top_right.y <= self.pos.y and self.pos.y <= top_right.y)", "def is_gentarget(self, target):\r\n raise NotImplementedError", "def inSmallBlindPosition(self):\n return len(self.in_game) > 0 and ((self.dealer + 1) % len(self.in_game)) == self.position", "def is_at_target_position(self, position, tolerance=0.0):\n x, _ = position\n return x > self.corridor_length - tolerance", "def off_screen(self):\n return self._x < 0", "def check_boundary(self):\n turtle_position = self.turtle.position()\n if turtle_position[0] > self.screen_width/2 - 40 and int(self.turtle.heading()) == 0:\n return False\n if turtle_position[0] < -self.screen_width/2 + 40 and int(self.turtle.heading()) == 180:\n return False\n if turtle_position[1] > self.screen_height/2 - 40 and int(self.turtle.heading()) == 90:\n return False\n if turtle_position[1] < -self.screen_height/2 + 40 and int(self.turtle.heading()) == 270:\n return False\n return True", "def ball_touch(self, ball1, ball2):\r\n cdistsq = ((ball2.x-ball1.x)**2 +\r\n (ball2.y-ball1.y)**2)\r\n if cdistsq < (ball1.r+ball2.r)**2:\r\n return True\r\n return False", "def isInlineOfFire(self, shooter, target):\n if self == shooter or self == target or target == shooter:\n raise Exception(\n 'Must provide 3 distinct points to check line of fire'\n )\n\n tmpLeft = (shooter.y - self.y)*(target.x - self.x)\n tmpRight = (target.y - self.y)*(shooter.x - self.x)\n\n if tmpLeft == tmpRight:\n min_x = min(target.x, shooter.x)\n max_x = max(target.x, shooter.x)\n min_y = min(target.y, shooter.y)\n max_y = max(target.y, shooter.y)\n x_condition = min_x <= self.x <= max_x\n y_condition = min_y <= self.y <= max_y\n if x_condition and y_condition:\n return True\n return False", "def see_behind(self):\n return True", "def check_target_position(environment, target_xy, fovea):\n temp_fovea = Fovea(target_xy, fovea.size, [0, 0, 0], fovea.unit)\n temp_image = temp_fovea.get_focus_image(environment)\n return temp_image", "def IsGameOver(self):\n return any(c.cX + c.width >= self.end_location for c in self.enemies)", "def needs_home(self):\r\n return not bool(self.__lib.CC_CanMoveWithoutHomingFirst(self.__serno))", "def scroll_to(self):\n\n if self:\n pass", "def check_edges(self):\n if self.rect.right >= self.screen_rect.right or self.rect.left <= 0:\n return True", "def is_shooting(self):\n if self.gun_interface:\n return self.gun_interface.is_preparing()\n return False", "def always_top(self, value: bool):\n self.tk_ref.wm_attributes('-topmost', int(value))", "def CheckOutOfWindow(window, pt):\r\n\r\n auiWindowMargin = 30\r\n marginRect = wx.Rect(*window.GetClientRect())\r\n marginRect.Inflate(auiWindowMargin, auiWindowMargin)\r\n\r\n return not marginRect.Contains(pt)", "def is_at_goal(self):\n return self._current_loc.get_row() == BoardPath._goal_loc.get_row() and \\\n self._current_loc.get_column() == BoardPath._goal_loc.get_column()", "def IsMouseWellOutsideWindow(self):\r\n \r\n screen_rect = self.GetScreenRect() \r\n screen_rect.Inflate(50, 50)\r\n \r\n return not screen_rect.Contains(wx.GetMousePosition())", "def hit(self, otherball):\r\n dx = (self.unif[0] + self.vx) - (otherball.unif[0] + otherball.vx)\r\n dy = (self.unif[1] + self.vy) - (otherball.unif[1] + otherball.vy)\r\n rd = self.radius + otherball.radius\r\n return dot(dx, dy) < (rd * rd)", "def check_pin_ball_hit(time_elapsed):\n\n pass", "def is_node_in_threat_zone(self, y, x):\n y_condition = self.top_left_y <= y < self.top_left_y + self.height\n x_condition = self.top_left_x <= x < self.top_left_x + self.width\n return y_condition and x_condition", "def isGameOver(self):\n for row in range(0, self.rows):\n for col in range(0, self.cols):\n if self.isMine(row, col) and self.isClicked(row, col):\n return True\n return False", "def _test_stick_position(self, target):\n\n cue = np.array(self.cue_coords)\n target = np.array(target)\n\n # Get rotation matrix\n delta = target - cue\n l = np.linalg.norm(delta)\n rotation = np.array([[delta[1] / l, -delta[0] / l], [delta[0] / l, delta[1] / l]])\n\n rot_start = rotation.dot(target)\n rot_end = rotation.dot(cue)\n\n for ball in self.other_balls:\n rot_ball = rotation.dot(np.array(ball))\n dist = np.abs(rot_ball[0] - rot_start[0])\n if dist < 2.1 * self.ball_radius:\n return False\n\n return True" ]
[ "0.733528", "0.73351", "0.6623284", "0.6466618", "0.6390728", "0.62321746", "0.6225023", "0.62215257", "0.6199803", "0.61606985", "0.6129186", "0.610839", "0.6086093", "0.6061291", "0.6013783", "0.598041", "0.5974214", "0.5962553", "0.58782667", "0.58218175", "0.58168215", "0.5798178", "0.57614124", "0.57439274", "0.5715913", "0.5681475", "0.56807196", "0.56783926", "0.5660252", "0.56162345", "0.55915385", "0.55799586", "0.5576997", "0.5576997", "0.55596423", "0.5556503", "0.5543146", "0.5525516", "0.54511446", "0.5444673", "0.5433197", "0.5426676", "0.54221153", "0.54119545", "0.5400461", "0.53918576", "0.53748447", "0.537045", "0.5342626", "0.53405964", "0.53394514", "0.5328374", "0.5324625", "0.53218997", "0.5310908", "0.53057665", "0.5305196", "0.53041935", "0.5301352", "0.529592", "0.5293065", "0.5291381", "0.5275802", "0.5268745", "0.5263069", "0.52529556", "0.5243942", "0.5238366", "0.5232231", "0.52226967", "0.52122724", "0.52085286", "0.52061725", "0.5204568", "0.5200684", "0.5199228", "0.51940745", "0.51738626", "0.5160852", "0.5155354", "0.5155065", "0.51526535", "0.5152343", "0.5145166", "0.5138759", "0.51380223", "0.51365024", "0.51340944", "0.5133361", "0.5121194", "0.512109", "0.5116775", "0.51162", "0.5111883", "0.5108607", "0.5105401", "0.5104589", "0.510436", "0.51034826", "0.5102688" ]
0.8144298
0
Move the target up and down
def update(self): self.y += (self.settings.target_speed * self.target_direction) self.rect.y = self.y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def up(self):\n self.move(0, 1)", "def move_up(self):\n self.move_step(-1)", "def up(self):\n self.forward(MOVE_DISTANCE)", "def do_up(self, arg):\r\n moveDirection('up')", "def move_up(self):\n self.move_measurement(-1)", "def move_up(self):\r\n if self.rect.top > 0:\r\n self.rect.top -= self.speed", "def movePlayerUp(self):\r\n self.player.moveUp()", "def _move_up(self, exclude=None):\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)", "def move_up(self):\n self.pitch_motor.step_backward()", "def onMoveUp(self):\n self.mainGrid.moveUp()", "def move(self):\n pass", "def move_up(self, distance):\r\n return self.move('up', distance)", "def move_up(self,distance):\n client.moveByVelocityAsync(0, 0, 1, 0.3).join()\n # if self.logging:\n # self.log_arr.append(\"up\")", "def move_up(self):\n\t\treturn self._move(up=True)", "def move(self) -> None:\n\n if self.move_up:\n self.__moveUpIfPossible()\n if self.move_down:\n self.__moveDownIfPossible()", "def move_up():\n return __maze.move_up()", "def move_up(self):\n if self.center.y < (self.screen_height - (self.height / 2)):\n self.center.y += 5", "def move_up(self):\n\n prev_sibling = self.get_previous_sibling()\n if prev_sibling!=None: \n self.move_to(prev_sibling,'left')\n self.save()", "def go_up(self):\n\t\tself.x,self.y = Mario._get_coordinates(Board.prev_i,Board.prev_j,Board.prev_k)\n\t\tMario._pass(self.x,self.y)\n\t\ttemp = self.x\n\t\tflag = 0\n\t\twhile(temp>=self.x-8):\n\t\t\tif(Board.board[temp][self.y] in obstacles):\n\t\t\t\tflag = 1\n\t\t\t\ttemp_x = temp+1\n\t\t\t\tbreak\n\t\t\ttemp = temp-1\n\n\t\tif(not flag):\n\t\t\ttemp_x = self.x-8\n\n\t\tif Board.board[temp_x-1][self.y]=='B':\n\t\t\tnew = self.y\n\t\t\tfor i in range(new-4,new+5):\n\t\t\t\tif Board.board[temp_x-1][i]=='B':\n\t\t\t\t\tBoard.board[temp_x-1][i]='T'\n\t\t\tMario.bonus+=50\n\t\t\tif self.y==229 or self.y ==230 or self.y==231:\n\t\t\t\tBoard.board[23][230]='P'\n\n\n\t\tBoard.board[temp_x][self.y] = 'M'\t\t\n\t\tos.system('clear')\n\t\tinit_board(Board.prev_i,Board.prev_j,Board.prev_k)", "def move_down(self):\n self.move_measurement(1)", "def move_down(self):\n self.move_step(1)", "def move_up(self):\n #if user moves paddle right on top of screen, they won't be able to move it more upwards by using this if statement\n #SCREEN_HEIGHT - 20 = Exact number of pixels where paddle can stop exactly on top edge but still has its body fully shown\n if self.center.y < SCREEN_HEIGHT - 20:\n self.center.y += MOVE_AMOUNT", "def down(self):\n self.move(0,-1)", "def move_up(self):\n if self.pointer != 0:\n logging.debug(\"moved up\")\n self.pointer -= 1\n self.refresh()\n return True\n else: \n return False", "def move_up(self, dist):\r\n self.send_command_without_response(f'up {dist}')", "def move_up(self):\n return self._move(up=True)", "def bring_down(self):\n\n self.move(self.__min_step__)", "def move_down(self):\n self.y -= 1", "def move_down(self):\r\n if self.rect.bottom < BG_HEIGHT - 60:\r\n self.rect.top += self.speed", "def up(self):\n if self.head.heading() != DOWN and self.last_direction != DOWN:\n self.head.setheading(UP)", "def move(self):\n raise NotImplementedError", "def move_to_position2(self):", "def move_up(self):\r\n self.image = pygame.transform.scale(pygame.image.load_extended(\"images/playerImage_up.png\").convert_alpha(),\r\n (50, 50))\r\n self.image.set_colorkey((255, 255, 255))\r\n self.rect.y -= self.speed\r\n self.lastMove = 'up'", "def open_up(self):\n\n self.move(self.__max_step__)", "def up_down(self, up):\n if up == 'u':\n up = 1\n elif up == 'n':\n up = 0\n elif up == 'd':\n up = -1\n else:\n raise ValueError(\"The heck you doing Servo?? u d or n ONLY\")\n self.h += up\n if self.get_pos() == blocks['wall']:\n self.h -= up", "def move_up(self):\n\n # slowly drive backwards\n self.velocity = const.Driving.MAX_VELOCITY\n self.angle = const.Driving.NEUTRAL_STEERING_ANGLE\n\n # drive as long there is enough space to the next vehicle or obstacle\n gap = self.formation.calc_gap()\n self.start_driving()\n while self.sensor_manager.front > gap: continue\n\n self.stop_driving()", "def move_backward():\n pass", "def move_up(self):\n if self.pointer != 0:\n logging.debug(\"moved up\")\n self.pointer -= 1\n self.refresh()\n self.reset_scrolling()\n return True\n else:\n return False", "def move_rel(self):\n pass", "def move_up(self):\n\n if self.ycor() > 115:\n self.sety(130)\n else:\n new_y = self.ycor() + 40\n self.sety(new_y)", "def move(self):\n \n self.position = self.explore()", "def move_to(self, target):\n self.map.breadth_first_search(self.position, target)\n path = self.map.get_path(target, self.position)\n for node in path[1:]:\n mask = (\n node.x - self.position.x,\n node.y - self.position.y\n )\n direction = self.MASKS[mask]\n self.move(direction)", "def head_towards(self):\n dest = self.target_destination - self.location\n if dest.length() != 0:\n dest.scale_to_length(self.speed)\n dest.normalize()\n self.rect.left += dest.x\n self.rect.top += dest.y", "def movePlayerDown(self):\r\n self.player.moveDown()", "def move_to_position1(self):", "def move_down(self):\n if self.center.y > (self.height / 2):\n self.center.y -= 5", "def move(self, direction):\n # replace with your code\n pass", "def move(self, direction):\n # replace with your code\n pass", "def _move(self, direction, difference):\n future_tile_number = self.get_number() + difference\n if future_tile_number in range(1, Tile.total_tiles + 1):\n future_tile = Tile.get_tile(future_tile_number)\n if future_tile.walkable:\n self.set_target(future_tile)\n self.rotate(direction)", "def _move_up(self, position):\n if position != self._data.first(): # consider moving\n cnt = position.element()._count\n walk = self._data.before(position)\n if cnt > walk.element()._count: # must shift forward\n while (walk != self._data.first() and cnt > self._data.before(walk).element()._count):\n walk = self._data.before(walk)\n self._data.add_before(walk, self._data.delete(position)) # delete / reinsert", "def move(self):\n self.pos += self.direc\n self.nearest_node = self.pixel_to_node()", "def moveUp(self):\n if self._position.y != 0:\n self._position.y -=1\n return True\n return False", "def move_forward():\n pass", "def move_to_target(self, target_row, target_col, row, col):\r\n move = \"\"\r\n # typical move to move target tile to target pos.\r\n solver_move = \"druld\"\r\n # move up first\r\n move = (target_row - row) * \"u\"\r\n # conditional statements for moving the tile:\r\n # 1. case curr_pos of tile and target_tile are in same col\r\n if (target_col - col) == 0:\r\n move += \"ld\" + ((target_row - row) - 1) * solver_move\r\n else:\r\n # 2. curr_pos of tile is on the left of target pos\r\n if (target_col - col) > 0:\r\n move += (target_col - col) * \"l\"\r\n if row == 0:\r\n move += (abs(target_col - col) - 1) * \"drrul\"\r\n else:\r\n move += (abs(target_col - col) - 1) * \"urrdl\"\r\n # 3. curr_pos of tile is on the right of target pos:\r\n elif (target_col - col) < 0:\r\n move += (abs(target_col - col) - 1) * \"r\"\r\n if row == 0:\r\n move += abs(target_col - col) * \"rdllu\"\r\n else:\r\n move += abs(target_col - col) * \"rulld\"\r\n move += (target_row - row) * solver_move\r\n return move", "def move_up ( self ):\n list, index = self.get_info()\n self.value = (list[:index-1] + [ list[index], list[index-1] ] + \n list[index+1:])", "def move(self):\n \n self.position = self.wander()", "def swipe_up(self):\n self.driver.swipe(start_x=self.x_cord, start_y=self.start_y,\n end_x=self.x_cord, end_y=self.end_y, duration=1000)", "def move_up(self, step: int = 1) -> None:\n if self.cursor_pos.x == 0:\n self.cursor_pos = Point(self.height - step, self.cursor_pos.y)\n else:\n self.cursor_pos = Point(self.cursor_pos.x-step, self.cursor_pos.y)", "def moveDown():\n tt.right(90)\n tt.forward(60)\n tt.right(90)\n tt.forward(250)\n tt.right(180)", "def autoMove(self) :\n\n\t\tdx = Places.getLoc(self.targetPlace)[0] - self.avatarNP.getX()\n\t\tdy = Places.getLoc(self.targetPlace)[1] - self.avatarNP.getY()\n\t\tdist = math.sqrt(dx*dx + dy*dy)\n\t\th0 = self.avatarNP.getH()\n\t\tif dist < 4 :\n\t\t\t# pick new target and determine deltaH\n\t\t\tnbors = Places.getNeighbors(self.targetPlace)\n\t\t\tx = random.randint(0,len(nbors)-1)\n\t\t\tif nbors[x] == self.oldPlace :\n\t\t\t\tx = (1 if x == 0 else x-1)\n\t\t\tt = nbors[x]\n\t\t\th = self.heading(\n\t\t\t\tself.avatarNP.getX(), self.avatarNP.getY(),\n\t\t\t\tPlaces.getLoc(t)[0], Places.getLoc(t)[1])\n\t\t\tself.deltaH = h - h0\n\t\t\tif self.deltaH > 180 : self.deltaH -= 360\n\t\t\telif self.deltaH < -180 : self.deltaH += 360\n\t\t\tself.deltaH /= 2\n\t\t\tself.oldPlace = self.targetPlace\n\t\t\tself.targetPlace = t\n\t\t\tself.turning = True\n\n\t\t# adjust heading and position\n\t\tt = self.targetPlace\n\t\th = self.heading(self.avatarNP.getX(), self.avatarNP.getY(),\n\t\t\t\t Places.getLoc(t)[0], Places.getLoc(t)[1])\n\t\tdh1 = h - h0\n\t\tif dh1 > 180 : dh1 -= 360\n\t\telif dh1 < -180 : dh1 += 360\n\t\tif self.turning :\n\t\t\tdh2 = self.deltaH * globalClock.getDt()\n\t\t\tif math.fabs(dh1) <= math.fabs(dh2) : \n\t\t\t\tself.turning = False\n\t\t\telse :\n\t\t\t\th = h0 + dh2\n\t\tself.avatarNP.setH(h)\n\t\tself.avatarNP.setFluidY(self.avatarNP,-2 * globalClock.getDt())\n\t\t\n\t\treturn\n\n\t\t\"\"\"\n\t\tif self.rotateDir == -1:\n\t\t\tself.rotateDir = random.randint(1,25) #chances to rotate\n\t\tif self.rotateDuration == -1:\n\t\t\tself.rotateDuration = random.randint(200,400)\n\n\t\t# guide the moving direction of the bot\n\t\tif self.rotateDir <= 3 : # turn left\n\t\t\tself.avatarNP.setH(self.avatarNP.getH() + \\\n\t\t\t\t\t 40 * globalClock.getDt())\n\t\t\tself.rotateDuration -= 1\n\t\t\tif self.rotateDuration <= 0:\n\t\t\t\tself.rotateDuration = -1\n\t\t\t\tself.rotateDir = -1\n\t\telif self.rotateDir <= 6 : # turn right\n\t\t\tself.avatarNP.setH(self.avatarNP.getH() - \\\n\t\t\t\t\t 50 * globalClock.getDt())\n\t\t\tself.rotateDuration -= 1\n\t\t\tif self.rotateDuration <= 0:\n\t\t\t\tself.rotateDuration = -1\n\t\t\t\tself.rotateDir = -1\n\t\telif self.rotateDir == 7 : # turn big left\n\t\t\tself.avatarNP.setH(self.avatarNP.getH() + \\\n\t\t\t\t\t 102 * globalClock.getDt())\n\t\t\tself.rotateDuration -= 1\n\t\t\tif self.rotateDuration <= 0:\n\t\t\t\tself.rotateDuration = -1\n\t\t\t\tself.rotateDir = -1\n\t\telif self.rotateDir == 8 : # turn big right\n\t\t\tself.avatarNP.setH(self.avatarNP.getH() - \\\n\t\t\t\t\t 102 * globalClock.getDt())\n\t\t\tself.rotateDuration -= 1\n\t\t\tif self.rotateDuration <= 0:\n\t\t\t\tself.rotateDuration = -1\n\t\t\t\tself.rotateDir = -1\n\t\telse :\n\t\t\tself.rotateDuration -= 1\n\t\t\tif self.rotateDuration <= 0:\n\t\t\t\tself.rotateDuration = -1\n\t\t\t\tself.rotateDir = -1\n\t\t\tself.avatarNP.setFluidPos(self.avatarNP, 0,\n\t\t\t\t\t-1 * globalClock.getDt(),\n\t\t\t\t\tself.avatarNP.getZ() )\n\t\t# moving forward\n\t\t#self.avatarNP.setFluidPos(self.avatarNP, 0,\n\t#\t\t\t\t-1 * globalClock.getDt(),\n\t#\t\t\t\tself.avatarNP.getZ() )\n\t\treturn\n\t\t\"\"\"", "def move_target(self, distance_adjustment):\n\t\tself.x = float(self.screen_rect.right - self.width)\n\t\tself.x = self.x * distance_adjustment\n\t\tself.rect.x = self.x", "def move(self, direction):\n pass", "def move_up(self, lifting, **kwargs):\n self.log.debug(\"Moving table up by {!s} microns\".format(lifting))\n if not self.variables[\"Table_state\"]:\n success = self.move_to([0, 0, lifting], False, 0, True, **kwargs)\n if success:\n self.variables[\"Table_state\"] = True # true means up\n return success\n else:\n self.queue.put({\"Info\": \"Table already in the up position...\"})\n return True", "def movement(self):", "def move_down():\n return __maze.move_down()", "def walk_up(self, sound): \n \n # Checks if tile below Player is free, and if they are not in an animation cycle\n if (self.__maze_arrangement[self.__user_x][self.__user_y - 1] != 1) and not self.__animating:\n \n # Sets Player direction to up, animating state to true, moves the Player upwards\n # by one tile, and plays the walking sound effect \n self.__direction = \"UP\"\n self.__animating = True\n self.__user_y -= 1\n sound.play()", "def _movePaddle(self):\n self._click()\n self._game.updatePaddle(self._touch)\n self._last = self._touch", "def do_down(self, arg):\r\n moveDirection('down')", "def movePlayerTo(self, target):\n if self.player:\n row = 1\n if not self.player.first: # player 1 or 2\n row = -1\n\n if self.player.king:\n if abs(target.row - self.row) == 1 and abs(target.col - self.col) == 1: # move\n target.player = self.player\n self.player = None\n self.diselect()\n target.checkKing()\n return 1\n if abs(target.row - self.row) == 2 and abs(target.col - self.col) == 2: # eat\n mid = getBlockBetween(self, target)\n debugBoard()\n if mid.player and mid.player.first != self.player.first: # can eat\n mid.player = None\n target.player = self.player\n self.player = None\n self.diselect()\n target.checkKing()\n return 2\n pass\n else:\n if target.row == self.row + row and abs(target.col - self.col) == 1: # move\n target.player = self.player\n self.player = None\n self.diselect()\n target.checkKing()\n return 1\n if target.row == self.row + row * 2 and abs(target.col - self.col) == 2: # eat\n mid = getBlockBetween(self, target)\n debugBoard()\n if mid.player and mid.player.first != self.player.first: # can eat\n mid.player = None\n target.player = self.player\n self.player = None\n self.diselect()\n target.checkKing()\n getGame().board.checkWin()\n return 2\n return 0", "def move_down(self):\n client.moveByVelocityAsync(0, 0, -1, 0.3).join()\n # if self.logging:\n # self.log_arr.append(\"down\")", "def step(self, move):", "def _move_up(self, p):\n if p != self.data.first():\n self.data.add_first(self.data.delete(p))", "def move_up(self, request):\n return self._move(True, request)", "def rightUp(self):", "def up(self, x, y, z):\n self.curr_up = Vector([x, y, z])\n self.ptr.up(x, y, z)", "def _move_up(self, p):\n if p != self._data.first():\n self._data.add_first(self._data.delete(p)) # remove or delete it from initial place and reinsert in new position", "def move_down(self):\n\t\treturn self._move(up=False)", "def _move_up(self, position):\n if position != self._data.first():\n self._data.add_first(self._data.delete(position))", "def move(x,y):\r\n pass", "def move(self, dt):\n dt = dt", "def move_up(self):\n kya = self.board.board[self.player.y-1][self.player.x]\n if self.player.y > 0 and kya != 'X' and kya != 'G':\n self.board.board[self.player.y][self.player.x] = '.'\n self.coin_taken(0, -1)\n self.board.board[self.player.y-1][self.player.x] = 'P'\n self.player.y -= 1\n else:\n print \"Can't move up\"\n self.dont_move_ghosts = 1", "def player_up(self) -> None:\n self._routes[self._current_route_key][\"UP\"] += 1\n new_pos = self._player.y - self.MOVE_INC\n if new_pos + self.PLAYER_DIM <= self._height and new_pos - self.PLAYER_DIM >= 0:\n self._player.y = new_pos", "def move(self,x,y):\n assert (type(x) in [int, float]), \"parameter x:%s is not a valid number\" % `x`\n assert (type(y) in [int, float]), \"parameter y:%s is not a valid number\" % `y`\n d = self._turtle.isdown()\n if d:\n self._turtle.penup()\n self._turtle.setposition(x,y)\n if d:\n self._turtle.pendown()", "def moveBy(self, x, y):\n\t\tself.moveTo(self.x + x, self.y + y)", "def move_to(self, x, y):\n self.x = x\n self.y = y", "def move_to(self, x, y):\n self.x = x\n self.y = y", "def move_down(self, distance):\r\n return self.move('down', distance)", "def up(self):\n if self.top == self.current:\n return\n else:\n self.current += 1", "def leftUp(self):", "def up(self):\n if self.selected_offset > 0:\n self.selected_offset -= 1\n if self.selected_offset < self.top_offset:\n self.top_offset -= 1\n self.__update_display()\n self.__update_selection()", "def moveDown(self,event):\n oldCoords=[self.xPos,self.yPos]\n \n self.yPos= self.yPos+1 #modify the coordiantes\n \n deltaCoords=[self.xPos-oldCoords[0],self.yPos-oldCoords[1]]\n self.canvasIGetDrawnOn.move(self.sprite,*deltaCoords)", "def move_towards(self, target_x, target_y, game_map, entities):\n path = game_map.compute_path(self.x, self.y, target_x, target_y)\n\n dx = path[0][0] - self.x\n dy = path[0][1] - self.y\n\n if game_map.walkable[path[1][0], path[1][1]] and \\\n not get_blocking_entities_at_location(entities, self.x + dx, self.y + dy):\n self.move(dx, dy)", "def moveturtle(x,y,t):\n t.penup()\n t.goto(x,y)\n t.pendown()", "def move(self, direction, cycles):\n\t\tpass", "def move_lift_up():\n return _move_lift(1)", "def move(self, agent, action):\n\t\tpass", "def move(self, target=None):\n visible_tiles = vision.vision(15, self.world_map, self.tile)\n visible_tiles = filter(BeesSprite.pollinated_filter, visible_tiles)\n target_tile = vision.find_target(visible_tiles, self.prey)\n if target_tile:\n move_to_tile = vision.approach(self.tile, target_tile, self.world_map)\n if self.is_movable_terrain(move_to_tile) and \\\n self.not_contains_sprite(move_to_tile, self.prey):\n if move_to_tile == target_tile:\n move_to_tile.contains_sprite.pollinate()\n AnimalSprite.move(self, move_to_tile)\n else:\n AnimalSprite.move(self)\n else:\n AnimalSprite.move(self)", "def swipe_up(self):\n self.swipe_sub(SWIPE_MATRIX[0])", "def move_down(self):\n self.pitch_motor.step_forward()", "def move_to(self, x, y):\n self._impl.move_to(x, y)" ]
[ "0.7641986", "0.75890297", "0.75058055", "0.7483884", "0.7452509", "0.7446572", "0.73249847", "0.7263192", "0.71093076", "0.7087285", "0.70684254", "0.7036653", "0.7012248", "0.6975011", "0.69614774", "0.6866236", "0.6853183", "0.6853097", "0.68411994", "0.68393564", "0.68263525", "0.6805023", "0.6771697", "0.6764083", "0.67595124", "0.6729754", "0.67107487", "0.67009723", "0.6676979", "0.667064", "0.665755", "0.6655251", "0.6649442", "0.6635043", "0.66232073", "0.66166186", "0.6609544", "0.65914375", "0.6584212", "0.6574294", "0.65635264", "0.65372366", "0.65338665", "0.6504874", "0.64953476", "0.64944214", "0.64912707", "0.64912707", "0.6485298", "0.64762676", "0.6471199", "0.646627", "0.6457436", "0.64537656", "0.6450984", "0.6437834", "0.6415154", "0.6412887", "0.6410072", "0.6399943", "0.6395167", "0.63925886", "0.6377073", "0.6365725", "0.6352395", "0.63496864", "0.63360405", "0.6324488", "0.63206035", "0.63204765", "0.6319889", "0.6312101", "0.630895", "0.6304544", "0.62995285", "0.6293351", "0.62895125", "0.62862325", "0.6284335", "0.6281812", "0.6274272", "0.62687206", "0.6254449", "0.6253228", "0.62394077", "0.62394077", "0.62307847", "0.62272143", "0.6225048", "0.6223481", "0.62205225", "0.6217237", "0.6214733", "0.62118345", "0.6204592", "0.62008274", "0.619904", "0.6196673", "0.61904705", "0.61772007" ]
0.6171909
100
Start random direction movement
def go(self): # if we want to go to the right, we need to decrease x and increase y # if we want to go to the left, we need to increase x and decrease y h = random.randrange(2, 4) v = random.randrange(1, 3) if not bool(random.getrandbits(1)): h = - h self.velocity = [h, -v] self.explode.play()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setRandDirection(self):\n phi = 2*math.pi*random.random()\n u = 2*random.random() - 1\n v = math.sqrt(1-u*u)*math.cos(phi)\n w = math.sqrt(1-u*u)*math.sin(phi)\n self.direction = (u,v,w)", "def random_move(turtle, distance):\n angle = uniform(-90,90)\n d = uniform(0,distance)\n turtle.left(angle)\n turtle.forward(d)", "def __random_movement(self):\n\t\tself.__steps += 1 \t\t# Increment after every frame\n\t\t# When __steps greater than threshold reverse the direction\n\t\t# and set threshold to a new random value\n\t\tif self.__steps >= self.__threshold_steps:\t\n\t\t\tif self.direction == 'RIGHT':\n\t\t\t\tself.move_left()\n\t\t\t\tself.direction = 'LEFT'\n\t\t\telse:\n\t\t\t\tself.move_right()\n\t\t\t\tself.direction = 'RIGHT'\n\t\t\tself.__threshold_steps = random.randint(25,50)\n\t\t\tself.__steps = 0\n\t\t# Confines the Donkeys movement to within the boundary \n\t\tself.__check_boundary()", "def MoveRandom(self):\n r = random.randint(0,3)\n if r == 0: self.x += 1\n elif r == 1: self.y += 1\n elif r == 2: self.x -= 1\n elif r == 3: self.y -= 1", "def random_direction():\n\n if randrange(2):\n return Direction.RIGHT\n else:\n return Direction.DOWN", "def chooseNewDirection(self, speedRange=None):\n if speedRange is None: speedRange = self.MONSTER_SPEED\n self.dx = random.randint(-speedRange, speedRange)\n self.dy = random.randint(-speedRange, speedRange)", "def random_walk(turtle, distance, steps):\n turtle.color(randcolor(), randcolor())\n for step in range(0,steps):\n random_move(turtle, distance)\n gohome(turtle)", "def move(self):\n if random.random() < 0.5:\n self.y = (self.y + 1) % 100\n else:\n self.y = (self.y - 1) % 100\n if random.random() < 0.5:\n self.x = (self.x + 1) % 100\n else:\n self.x = (self.x - 1) % 100", "def update(self):\n if self.x<0:\n self.x = 0\n\n if self.y <0:\n self.y = 0\n\n if bool(randint(0, 1))==True:\n if self.walker == True:\n self.x += randint(-2, 2)\n self.y += randint(-2, 2)", "def randomWalk(t, turns, distance=20):\n for x in range(turns):\n if x % 2 == 0:\n t.left(random.randint(-180, 180))\n else:\n t.right(random.randint(-180, 180))\n t.forward(random.randint(1,distance))\n sleep(10)", "def move(self, direction):\n newx = self.x\n newy = self.y\n newy += random.randint(-1, 1)\n newx += random.randint(-1, 1)\n if self.tmap.contents[newy][newx] != '#':\n self.x = newx\n self.y = newy", "def random_step(self):\n pos = [i for i in range(9) if self.grid[i] == 0]\n move = random.choice(pos)\n return self.step(move)", "def random_step(self):\n pos = [i for i in range(9) if self.grid[i] == 0]\n move = random.choice(pos)\n return self.step(move)", "def wander(self, speed: float = 1.):\n pos = np.array(self.pos)\n\n random_heading = random.random() * 2 * np.pi\n rand_vector = speed * np.array([np.cos(random_heading), np.sin(random_heading)])\n target_location = pos + rand_vector\n target_location = np.clip(target_location, [0, 0], [99.9, 99.9])\n self.model.space.move_agent(self, target_location)\n return", "def ran_direction(self,room):\r\n ran_dirt = self.random()\r\n if 1 == ran_dirt: \r\n room.front_attch(True)\r\n elif 2 == ran_dirt: \r\n room.back_attch(True)\r\n elif 3 == ran_dirt: \r\n room.left_attch(True)\r\n elif 4 == ran_dirt: \r\n room.right_attch(True)\r\n return room", "def advance(self):\n #x and y coordinates move and advance by adding the randomly generated velocity \n self.center.x += self.velocity.dx\n self.center.y += self.velocity.dy\n return", "def move_aim(self):\n self.color = random.choice(COLORS)\n self.x += 3 * self.speed_x / FPS\n self.y += 3 * self.speed_y / FPS\n self.r -= 1\n self.draw_aim()\n if self.r <= 10:\n self.color = random.choice(COLORS)\n self.x = randint(100, 1000)\n self.y = randint(100, 800)\n self.r = randint(50, 100)\n self.speed_x = randint(-200, 200)\n self.speed_y = randint(-200, 200)\n if self.x >= 1100:\n self.speed_x = randint(-100, -10)\n if self.x <= 50:\n self.speed_x = randint(10, 100)\n if self.y >= 800:\n self.speed_y = randint(-100, -10)\n if self.y <= 50:\n self.speed_y = randint(10, 100)", "def move(self):\n if self._z >= 75:\n a = random.random()\n print(str(a))\n if a < 0.2:\n self._z += 1\n if a > 0.2 and a < 0.9:\n self._z -= 1\n if a > 0.9:\n self._z = self._z\n else: \n self._z -= 1\n \n b = random.random()\n print(str(b))\n if b < 0.1:\n self._y += 1\n if b > 0.1 and b < 0.2:\n self._y -= 1\n if b > 0.2 and b < 0.25:\n self._x -= 1\n if b > 0.25:\n self._x += 1", "def initialize_direction(self):\n\n self.mu = 2. * np.random.rand(1)[0] - 1.", "def randomize_trajectory(self):\n self.angle = randint(-360, 360)\n self.speed = randint(1, 5)/2.5", "def move(self):\n possible_steps = self.model.grid.get_neighborhood(\n self.pos,\n moore=False, # implements Von Neumann neighborhood\n include_center=False)\n new_position = self.random.choice(possible_steps)\n self.heading = [new_position[0] - self.pos[0],\n new_position[1] - self.pos[1]]\n self.model.grid.move_agent(self, new_position)", "def nextmove(x, y):\n direction = rn.randrange(0, 4)\n\n if direction == 0: # move up\n y += 1\n elif direction == 1: # move down\n y -= 1\n elif direction == 2: # move right\n x += 1\n elif direction == 3: # move left\n x -= 1\n else:\n print(\"[ERROR] Direction isn't 0-3\")\n\n return x, y", "def move(self):\n self.x += self.speed_x / FPS\n self.y += self.speed_y / FPS\n self.draw_ball()\n if self.x >= 1100:\n self.speed_x = randint(-100, -10)\n if self.x <= 50:\n self.speed_x = randint(10, 100)\n if self.y >= 800:\n self.speed_y = randint(-100, -10)\n if self.y <= 50:\n self.speed_y = randint(10, 100)", "def _rand_direction(dim, rand):\n direction = rand.normal(size=dim)\n return direction / la.norm(direction)", "def start(self, robot):\n rospy.loginfo(\"Moving randomly\" + \" - \" + str(robot.robot_id))", "def begin_auto_moving(self, direction):\n self.direction = direction\n self.image_list = self.animation_dict[direction]\n self.state = 'automoving'\n self.x_vel = self.vector_dict[direction][0]\n self.y_vel = self.vector_dict[direction][1]\n self.move_timer = self.current_time", "def _move_randomly(self):\n a, b = randint(0, len(self.state) - 1), randint(0, len(self.state) - 1)\n wiz1, wiz2 = self.state[a], self.state[b]\n self._swap_wizards(wiz1, wiz2)", "def adjust_starting_position(self, direction):\n\n direction = 1 if direction in [\"left\", 1] else -1\n\n self.angle = direction * 25\n self.distance = 12\n self.drive_thread.reset()\n\n while self.drive_thread.driven_distance < self.distance:\n time.sleep(0.1)\n\n self.angle = 0\n self.distance = 12\n self.drive_thread.reset()\n\n while self.drive_thread.driven_distance < self.distance:\n time.sleep(0.1)\n\n self.angle = direction * -25\n self.distance = 12\n self.drive_thread.reset()\n\n while self.drive_thread.driven_distance < self.distance:\n time.sleep(0.1)", "def generate_direction(self):\n random_enum = random.randint(1, 4)\n random_direction = flow_processing_input.Direction(random_enum)\n assert isinstance(random_direction, flow_processing_input.Direction)\n return random_direction", "def generate_direction(self):\n random_enum = random.randint(1, 4)\n random_direction = flow_processing_input.Direction(random_enum)\n assert isinstance(random_direction, flow_processing_input.Direction)\n return random_direction", "def sample_direction():\r\n gamma = random.random()\r\n mu = 2*gamma-1\r\n gamma = random.random()\r\n phi = 2*np.pi*gamma\r\n direction = [np.sqrt(1-mu**2)*np.cos(phi), np.sqrt(1-mu**2)*np.sin(phi), mu]\r\n return direction", "def move_to_random_pos(self):\n newpos = [(np.random.rand() - 0.5) * 0.1,\n (np.random.rand() - 0.5) * 0.1,\n np.random.rand() * 0.9 + 0.2]\n self.move_to(newpos)", "def get_direction(self):\n is_direction_correct = False\n while not is_direction_correct:\n direction = random.randint(0, 2)\n if direction == 0:\n self.turtle.left(90)\n elif direction == 1:\n self.turtle.right(90)\n else:\n self.turtle.right(0)\n is_direction_correct = self.check_boundary()", "def example_move(self):\n self.right() # start rotating right\n time.sleep(1) # turn for a second\n self.stop() # stop\n self.servo(1000) # look right\n time.sleep(.25) # give your head time to move\n self.servo(2000) # look left", "def example_move(self):\n self.right() # start rotating right\n time.sleep(1) # turn for a second\n self.stop() # stop\n self.servo(1000) # look right\n time.sleep(.25) # give your head time to move\n self.servo(2000) # look left", "def _random_start_position(self):\r\n self.position = np.array(random.choice(self.start_positions),\r\n dtype=np.int16)", "def set_velocity(self):\n self.__dx = random.randint(1, MAX_X_SPEED)\n self.__dy = INITIAL_Y_SPEED\n if random.random() > 0.5:\n self.__dx = -self.__dx\n if random.random() > 0.5:\n self.__dy = -self.__dy", "def random_pose(self):\n position = self._start\n while self[position].distance < np.sum(self._rooms.shape) * 2:\n position = np.array(\n [random.randrange(limit) for limit in self._rooms.shape]\n )\n direction = random.choice(self.exits(position))\n return (position, direction)", "def direction_correction(self):\n self.directions.monster = random.uniform(self.directions.monster * self.get_monster_sensitivity(),\n self.directions.monster * (1 + (1 - self.get_monster_sensitivity())))\n self.directions.food = random.uniform(self.directions.food * self.get_food_sensitivity(),\n self.directions.food * (1 + (1 - self.get_food_sensitivity())))\n self.directions.water = random.uniform(self.directions.water * self.get_water_sensitivity(),\n self.directions.water * (1 + (1 - self.get_water_sensitivity())))", "def random_walk(n):\n x,y = 0,0\n for i in range(n):\n (dx,dy) = random.choice([(0,1),(1,0),(0,-1),(-1,0)])\n x += dx\n y+=dy\n return(x,y)", "def nod():\n while True:\n MOVEMENTS.set_raw_angle(7, 52)\n sleep(2)\n MOVEMENTS.set_raw_angle(7, 0)\n sleep(2)", "def move_random(self, board: Board) -> None:\n rnd_move_idx = randint(0,4)\n # moves: stay, up, left, right, down\n moves = [[0,0], [0,-1], [-1,0], [1,0], [0,1]]\n\n if board.can_position_at(self.x + moves[rnd_move_idx][0], self.y + moves[rnd_move_idx][1]):\n board.set_element_at_position(0, self.x, self.y)\n self.x += moves[rnd_move_idx][0]\n self.y += moves[rnd_move_idx][1]\n board.set_element_at_position(3, self.x, self.y)\n print(\"Bomberman moved to [\", self.x, \",\", self.y, \"]\")", "def getRandomDirection(self):\n\t\tx = self.x\n\t\ty = self.y\n\n\t\t# Get a random number between 0 and 3 inclusive: (0, 1, 2, 3)\n\t\tdirection = random.randint(0, 3)\n\t\tif direction == 0:\n\t\t\tx -= 1\t\t\t\t\t\t\t# Move Left\n\t\telif direction == 1:\n\t\t\tx += 1\t\t\t\t\t\t\t# Move Right\n\t\telif direction == 2:\n\t\t\ty -= 1\t\t\t\t\t\t\t# Move Up\n\t\telse:\n\t\t\ty += 1\t\t\t\t\t\t\t# Move Down\n\t\t\n\t\t# Return the co-ordinates as a tuple.\n\t\treturn (x, y)", "def __init__(self, room, speed):\n self.room = room\n self.speed = speed\n self.position = room.getRandomPosition()\n self.direction = random.randrange(359)", "def move(self):\n \n # checks for bots nearby\n next_move = self.follow()\n \n # finds a random move if no bot\n if next_move is self.position:\n self.position = self.wander()\n else:\n self.position = next_move", "def move_randomly(self, with_fight=False):\n delta = [(-1, -1), (-1, 0), (-1, 1), (0, 1), (0, -1), (1, -1), (1, 0), (1, 1)]\n rd.shuffle(delta)\n x, y = self.owner.pos\n while len(delta) > 0:\n dx, dy = delta.pop()\n if self.move_towards_position((x + dx, y + dy)):\n return", "def random_walk_2(n):\n x,y=0,0\n for i in range(n):\n dx,dy = random.choice([(0,1), (0,-1),(1,0) ,(-1,0)])\n x+= dx\n y+= dy\n return (x,y)", "def move_coarse(self, direction, count=1):\n if self._direction != direction and self.simulate_backlash:\n self._direction = direction\n backlash_offset = randint(-maximum_backlash, maximum_backlash)\n self._move(direction, 1, 8 + backlash_offset)\n self._move(direction, count - 1, 8)\n self.backlash_count += 1\n else:\n self._direction = direction\n self._move(direction, count, 8)", "def begin_moving(self, direction):\n self.direction = direction\n self.image_list = self.animation_dict[direction]\n self.timer = self.current_time\n self.move_timer = self.current_time\n self.state = 'moving'\n\n if self.rect.x % 32 == 0:\n self.y_vel = self.vector_dict[self.direction][1]\n if self.rect.y % 32 == 0:\n self.x_vel = self.vector_dict[self.direction][0]", "def random(self):\n adj = self.adjacent()\n self.switch(random.choice([pos for pos in adj if self.in_grid(pos) and pos != self.prev]))", "def __init__(self, room, speed):\n self.room = room\n self.speed = speed\n self.direction = random.randrange(0, 360)\n self.position = room.getRandomPosition()", "def move(self, direction):\n pass", "def make_random_move(self):\n #completely random move\n all_moves = set(itertools.product(range(self.height), range(self.width)))\n moves_left = list(all_moves - self.mines - self.moves_made)\n if not moves_left:\n return None\n return random.choice(moves_left)", "def next_move():\n move = int(4 * random.random())\n if move == 0:\n return [1, 0]\n elif move == 1:\n return [-1, 0] \n elif move == 2:\n return [0, 1] \n else:\n return [0, -1]", "def make_random_move(state: State) -> State:\n return random.choice(state.get_possible_states())", "def walk(self, dir):\n x, y, theta = dir\n self.motionProxy.moveToward(x, y, theta, [[\"Frequency\", 1]])\n self.isWalking = True", "def auto_play_random(self, player=None):\r\n if player is None:\r\n player = self.get_player()\r\n legal_list = self.get_legal_list()\r\n next_move = legal_list.rand_obj()\r\n self.new_edge(next_move)", "def turn(dir, speed, runtime):\n\trightMotor.run_timed(duty_cycle_sp=-dir*speed, time_sp=runtime)\n\tleftMotor.run_timed(duty_cycle_sp=dir*speed, time_sp=runtime)", "def move_me_on_spawn(self):\r\n\t\tif self.points_to_go:\r\n\t\t\tself.start_pos = self.points_to_go[0]\r\n\t\t\tfor point in self.points_to_go[1:]:\r\n\t\t\t\tfor i in range(len(self.points_to_go[1:])):\r\n\t\t\t\t\tself.goal_pos = self.points_to_go[i]\r\n\t\t\t\t\t\r\n\t\t\t\t\tself.move_me()\r\n\t\t\t\t\t#self.start_pos = \r\n\t\t\t\t\t#print(self.goal_pos)\r\n\t\t\t\t\t#if self.move_me():\r\n\t\t\t\t\t#\ti += 1\r\n\t\t\t\t\t#\tprint('switch')\r", "def turn(self, dir):\n if dir.upper() == 'R':\n if self.direction == 3:\n self.direction = 0\n else:\n self.direction += 1\n if dir.upper() == 'L':\n if self.direction == 0:\n self.direction = 3\n else:\n self.direction -= 1", "def run(self):\n while self.direction != \"\":\n if self.direction == \"decrease\":\n if self.position > 200:\n self.position -= 15\n elif self.direction == \"increase\":\n if self.position < 800:\n self.position += 15\n if self.direction != \"neutral\":\n self.move_joint(self.position, 900)\n time.sleep(0.1)", "def make_random_move(self):\n s=set()\n for i in range(self.height):\n for j in range(self.width):\n s.add((i,j))\n\n s=s-self.mines-self.moves_made\n if s==set(): return None\n return random.choice(list(s))\n #raise NotImplementedError", "def during(self, robot):\n self.counter += 1\n randint = random.randint(1, 5)\n\n if 1 <= randint <= 4 and not robot.is_blocked():\n robot.forward()\n else:\n robot.start_rotate()", "def move_player(direction):\n global ZERO_BASE_PLYR_POS\n if direction == \"north\":\n ZERO_BASE_PLYR_POS -= 10\n elif direction == \"south\":\n ZERO_BASE_PLYR_POS += 10\n elif direction == \"west\":\n ZERO_BASE_PLYR_POS -= 1\n elif direction == \"east\":\n ZERO_BASE_PLYR_POS += 1\n \n sleep(0.5) # all moves have a 0.5 second delay\n \n show_ground_feature()", "def __init__(self):\n self.positionx = 400\n self.positiony = 600\n # direction goes from [0,360)\n self.direction = (45)", "def step(self):\r\n\r\n self.velocity = 1\r\n new_pos = self.pos\r\n self.model.space.move_agent(self, new_pos)", "def _update_position(self):\r\n for tstep in range(0, self.MAX_VELOCITY + 1):\r\n t = tstep / self.MAX_VELOCITY\r\n pos = self.position + np.round(self.velocity * t).astype(np.int16)\r\n if self._is_wall(pos):\r\n self._random_start_position()\r\n self.velocity = np.array([0, 0], dtype=np.int16)\r\n return\r\n if self._is_finish(pos):\r\n self.position = pos\r\n self.velocity = np.array([0, 0], dtype=np.int16)\r\n return\r\n self.position = pos", "def auto_resting(self):\n self.image_list = self.animation_dict[self.direction]\n self.image = self.image_list[self.index]\n\n if self.rect.y % 32 != 0:\n self.correct_position(self.rect.y)\n if self.rect.x % 32 != 0:\n self.correct_position(self.rect.x)\n\n if (self.current_time - self.move_timer) > 2000:\n direction_list = ['up', 'down', 'left', 'right']\n random.shuffle(direction_list)\n direction = direction_list[0]\n self.begin_auto_moving(direction)\n self.move_timer = self.current_time", "def _animate(self):\n steps = (1, 7, 14)\n if self.rect.x < self.start_x - 100:\n self.change_dir = False\n elif self.rect.x > self.start_x + 100:\n self.change_dir = True\n self.direction = -1 if self.change_dir else 1\n self.rect.x += self.direction * choice(steps)", "def __move(particle, motion):\n particle[0] += random.gauss(0.0, motion)\n particle[1] += random.gauss(0.0, motion)\n return particle", "def move(self):\n\n # get the location we WOULD go to\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n while (abs (newX) > self.BOX_RANGE) or (abs(newY) > self.BOX_RANGE):\n # print(\"choosing new direction... \",end=\"\")\n self.chooseNewDirection()\n # print(self.dx, self.dy)\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n\n # now move our monster\n super().move()", "def move(self, model):\n grid = model.grid\n possible_steps = grid.get_neighborhood(\n self.pos, moore=True, include_center=True)\n choice = random.choice(possible_steps)\n grid.move_agent(self, choice)", "def get_random_direction(self) -> int:\n return int(self.generate_random_no() * 10 < 5)", "def run(self):\n move_cmd = Twist()\n move_cmd.linear.x = 0\n move_cmd.angular.z = 0\n\n while not rospy.is_shutdown():\n # bump logic as previous psets\n if self.bump:\n self.bump = False\n # move backwards\n move_cmd.linear.x = LIN_SPEED * -1\n for i in range(5):\n self.cmd_vel.publish(move_cmd)\n self.rate.sleep()\n rospy.sleep(1)\n\n # turn randomly in a random direction\n move_cmd.linear.x = 0\n move_cmd.angular.z = ROT_SPEED * ((-1)**random.randint(1,2))\n\n if self.bump == 0:\n move_cmd.angular.z = ROT_SPEED * (-1)\n elif self.bump == 2:\n move_cmd.angular.z = ROT_SPEED\n\n for i in range(random.randint(5,15)):\n self.cmd_vel.publish(move_cmd)\n self.rate.sleep()\n rospy.sleep(1)\n\n move_cmd.angular.z = 0\n # if somethin in the screen is really close\n elif self.min_val < MIN_THRESHOLD:\n # make sure it's not the sock/leg warmer, and is actually an obstacle\n if self.obstacle_x <= self.x or self.obstacle_x >= self.x + self.w or abs(self.min_val - self.dist) > 0.1:\n move_cmd.linear.x = 0\n # turn away\n if self.obstacle_x > 320:\n move_cmd.angular.z = ROT_SPEED / 2\n else:\n move_cmd.angular.z = -ROT_SPEED / 2\n # self.min_val = 100\n for i in range(10):\n self.cmd_vel.publish(move_cmd)\n self.rate.sleep()\n self.last_move = rospy.Time.now()\n else:\n rospy.loginfo(\"Perimeter \" + str(self.perimeter_size))\n rospy.loginfo(\"Distance is \" + str(self.dist))\n\n # normalize angle error to rot speed\n ang_error_norm = -float(self.ang_error) / 100\n\n # set min and max rot speed\n if ang_error_norm < -ROT_SPEED:\n ang_error_norm = -ROT_SPEED\n elif ang_error_norm > ROT_SPEED:\n ang_error_norm = ROT_SPEED\n\n move_cmd.angular.z = ang_error_norm\n\n if RACE == False:\n # normalize dist error to lin speed\n self.dist_error = self.dist - 0.5\n dist_error_norm = float(self.dist_error) / 2\n\n if dist_error_norm < 0:\n # if NaN (self.dist gets set to -1)\n if dist_error_norm > -0.7:\n self.lost = 0\n # if too close\n else:\n self.lost += 1\n # if it's been more than 2 seconds\n if rospy.Time.now() > self.last_move + rospy.Duration(2):\n dist_error_norm = 0\n # if been lost for a while rotate and beep\n if self.lost > 20:\n move_cmd.angular.z = ROT_SPEED / 4\n self.beep.publish(4)\n else:\n # continue as previous\n dist_error_norm = self.last_speed\n else:\n # set max lin speed\n if dist_error_norm > LIN_SPEED:\n dist_error_norm = LIN_SPEED\n\n # reset lost stats\n self.lost = 0\n self.last_speed = dist_error_norm\n self.last_move = rospy.Time.now()\n\n move_cmd.linear.x = dist_error_norm\n else:\n move_cmd.linear.x = LIN_SPEED\n\n self.cmd_vel.publish(move_cmd)", "def update(self):\n if self.iteration > self.rate:\n self.iteration = 0\n heading = (random.random() * 180) - 90\n self.speed = 0.1\n if heading >= 0:\n self.heading = heading\n else:\n self.heading = 360 + heading\n self.iteration += 1\n self.setVector(self.speed, self.heading)", "def reset_movement(self):\n self.direction = [0, 0]", "def test_move_default_extra_steps(self):\n player = ss.ResilientPlayer()\n random.seed(2)\n player.move()\n random.seed(1)\n player.move()\n random.seed(2)\n player.move()\n assert player.position == 32", "def move_distance(self, distance, speed=1.0):\n distance = random.normalvariate(distance, self.standard_deviation)\n\n start_point = self.get_position()\n traveled_distance = 0\n while traveled_distance < distance:\n self.forward(speed)\n current_point = self.get_position()\n traveled_distance = math.sqrt(\n math.pow((start_point[0] - current_point[0]), 2) + math.pow((start_point[1] - current_point[1]), 2))\n self.stop()", "def move(self, direction):\n # replace with your code\n pass", "def move(self, direction):\n # replace with your code\n pass", "def move(self):\r\n\r\n # Randomizes movement after 40 steps and flips sprite \\\r\n # (if x-value of speed variable changes from positive to negative)\r\n if step == 40 and 0 < hunger < 205 and thirst < 175 and self.speed[0] not in range(-1000, 0):\r\n self.speed[0] = random.randint(-5, -1)\r\n self.speed[1] = random.randint(-7, 7)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n\r\n # Randomizes movement after 40 steps, but doesn't flip sprite because \\\r\n # x-value of speed variable doesn't change from positive to negative\r\n elif step == 40 and 0 < hunger < 205 and thirst < 175 and self.speed[0] in range(-1000, 0):\r\n self.speed[0] = random.randint(-5, -1)\r\n self.speed[1] = random.randint(-7, 7)\r\n\r\n # Randomizes movement after 80 steps and flips sprite \\\r\n # (if x-value of speed variable changes from negative to positive)\r\n if step == 80 and 0 < hunger < 205 and thirst < 175 and self.speed[0] not in range(0, 1000):\r\n self.speed[0] = random.randint(1, 5)\r\n self.speed[1] = random.randint(-7, 7)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n\r\n # Randomizes movement after 80 steps, but doesn't flip sprite \\\r\n # because x-value of speed variable doesn't change from positive to negative\r\n elif step == 80 and 0 < hunger < 205 and thirst < 175 and self.speed[0] in range(0, 1000):\r\n self.speed[0] = random.randint(1, 5)\r\n self.speed[1] = random.randint(-7, 7)\r\n\r\n # Flips the dino sprite when it hits the left or right side of the enclosure \\\r\n # and reverses dino's speed\r\n if self.rect.right > 818 or self.rect.left < 182:\r\n # Keeps sprite from getting stuck on wall in an endless cycle of flipping\r\n if step != 40 and step != 80 and 0 < hunger < 205 and thirst < 175:\r\n self.speed[0] = - self.speed[0]\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n\r\n # Reverses the dino's speed if it hits the top or bottom side of the enclosure\r\n if self.rect.top < 55 or self.rect.bottom > 542:\r\n # Keeps sprite from getting stuck on wall in an endless cycle of flipping\r\n if step != 40 and step != 80 and 0 < hunger < 205 and thirst < 175:\r\n self.speed[1] = - self.speed[1]\r\n\r\n # Causes dinosaur to go to the tree when hunger is high enough\r\n if hunger >= 205:\r\n if step != 40 and step != 80 and 0 < thirst < 175:\r\n if self.rect.left > 300 and self.speed[0] not in range(-1000, 0):\r\n # Speed must be rounded so that speed[0] and speed[1] is in the range functions above \\\r\n # (range function doesn't take decimal point numbers)\r\n self.speed[0] = round((300 - self.rect.left)/30)\r\n self.speed[1] = round((340 - self.rect.top)/30)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n elif self.rect.left > 300 and self.speed[0] in range(-1000, 0):\r\n self.speed[0] = round((300 - self.rect.left)/30)\r\n self.speed[1] = round((340 - self.rect.top)/30)\r\n if self.rect.left < 300 and self.speed[0] not in range(1, 1000):\r\n self.speed[0] = round((300 - self.rect.left)/30)\r\n self.speed[1] = round((340 - self.rect.top)/30)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n elif self.rect.left < 300 and self.speed[0] in range(1, 1000):\r\n self.speed[0] = round((300 - self.rect.left)/30)\r\n self.speed[1] = round((340 - self.rect.top)/30)\r\n\r\n # Causes dinosaur to go to the pond when thirst is high enough\r\n if thirst == 175:\r\n if step != 40 and step != 80:\r\n if self.rect.left > 540 and self.speed[0] not in range(-1000, 0):\r\n self.speed[0] = round((540 - self.rect.left)/30)\r\n self.speed[1] = round((120 - self.rect.top)/30)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n elif self.rect.left > 540 and self.speed[0] in range(-1000, 0):\r\n self.speed[0] = round((540 - self.rect.left)/30)\r\n self.speed[1] = round((120 - self.rect.top)/30)\r\n if self.rect.left < 540 and self.speed[0] not in range(1, 1000):\r\n self.speed[0] = round((540 - self.rect.left)/30)\r\n self.speed[1] = round((120 - self.rect.top)/30)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n elif self.rect.left < 540 and self.speed[0] in range(1, 1000):\r\n self.speed[0] = round((540 - self.rect.left)/30)\r\n self.speed[1] = round((120 - self.rect.top)/30)\r\n\r\n # Sets rectangle surrounding dino sprite to new position based on its speed\r\n newpos = self.rect.move(self.speed)\r\n self.rect = newpos", "def __get_new_direction(self):\n return fabs(self.random_generator.normal(self.__direction_mean,\n self.__direction_deviation))", "def random_walk_2(n):\r\n x, y = 0, 0\r\n for i in range(n):\r\n (dx, dy) = random.choice([(0, 1), (0, -1), (1, 0), (-1, 0)])\r\n \r\n x += dx\r\n y += dy\r\n return (x, y)", "def bounce(self):\n self.__dx = random.randint(1, MAX_X_SPEED)\n self.__dy = -abs(self.__dy)\n if random.random() > 0.5:\n self.__dx = -self.__dx", "def set_random_pos(self, which):\n available = [[r, c] for r, row in enumerate(self.maze)\n for c, value in enumerate(row) if value == ' ']\n choice = random.choice(available)\n if which == 'starting':\n self.current_pos = choice\n elif which == 'finishing':\n self.finish_pos = choice", "def move_step(self, direction):\n x = self.objects[0].x\n y = self.objects[0].y\n if direction == 0 and y >= 1:\n self.objects[0].y -= 1\n elif direction == 1 and y <= self.size_y - 2:\n self.objects[0].y += 1\n elif direction == 2 and x >= 1:\n self.objects[0].x -= 1\n elif direction == 3 and x <= self.size_x - 2:\n self.objects[0].x += 1", "def move_dart(self):\n global level\n if level == 0:\n self.rect.centerx+=self.delta\n if self.rect.centerx >= 1000: \n self.delta = -1\n elif self.rect.centerx < 500:\n self.delta = 1\n elif level == 1:\n self.rect.centery+=self.delta\n if self.rect.centery <= 150: \n self.delta = 2\n elif self.rect.centery > 650:\n self.delta = -2\n elif level == 2:\n self.rect.centerx+=self.delta #To make changes in both x and y direction\n self.rect.centery+=self.delta\n if self.rect.centerx < 100 or self.rect.centery <= 100: \n self.delta = random.randint(1,10) #adds random speeds to the motion\n elif self.rect.centerx >= 900 or self.rect.centery > 700:\n self.delta = -random.randint(1,10)", "def random_walk(n):\n x,y = 0,0\n for i in range(n):\n step = random.choice(['N','S','E','W'])\n if step == 'N':\n y+=1\n elif step == 'S':\n y-=1\n elif step == 'E':\n x+=1\n else:\n x-=1\n return (x,y)", "def autoMove(self) :\n\n\t\tdx = Places.getLoc(self.targetPlace)[0] - self.avatarNP.getX()\n\t\tdy = Places.getLoc(self.targetPlace)[1] - self.avatarNP.getY()\n\t\tdist = math.sqrt(dx*dx + dy*dy)\n\t\th0 = self.avatarNP.getH()\n\t\tif dist < 4 :\n\t\t\t# pick new target and determine deltaH\n\t\t\tnbors = Places.getNeighbors(self.targetPlace)\n\t\t\tx = random.randint(0,len(nbors)-1)\n\t\t\tif nbors[x] == self.oldPlace :\n\t\t\t\tx = (1 if x == 0 else x-1)\n\t\t\tt = nbors[x]\n\t\t\th = self.heading(\n\t\t\t\tself.avatarNP.getX(), self.avatarNP.getY(),\n\t\t\t\tPlaces.getLoc(t)[0], Places.getLoc(t)[1])\n\t\t\tself.deltaH = h - h0\n\t\t\tif self.deltaH > 180 : self.deltaH -= 360\n\t\t\telif self.deltaH < -180 : self.deltaH += 360\n\t\t\tself.deltaH /= 2\n\t\t\tself.oldPlace = self.targetPlace\n\t\t\tself.targetPlace = t\n\t\t\tself.turning = True\n\n\t\t# adjust heading and position\n\t\tt = self.targetPlace\n\t\th = self.heading(self.avatarNP.getX(), self.avatarNP.getY(),\n\t\t\t\t Places.getLoc(t)[0], Places.getLoc(t)[1])\n\t\tdh1 = h - h0\n\t\tif dh1 > 180 : dh1 -= 360\n\t\telif dh1 < -180 : dh1 += 360\n\t\tif self.turning :\n\t\t\tdh2 = self.deltaH * globalClock.getDt()\n\t\t\tif math.fabs(dh1) <= math.fabs(dh2) : \n\t\t\t\tself.turning = False\n\t\t\telse :\n\t\t\t\th = h0 + dh2\n\t\tself.avatarNP.setH(h)\n\t\tself.avatarNP.setFluidY(self.avatarNP,-2 * globalClock.getDt())\n\t\t\n\t\treturn\n\n\t\t\"\"\"\n\t\tif self.rotateDir == -1:\n\t\t\tself.rotateDir = random.randint(1,25) #chances to rotate\n\t\tif self.rotateDuration == -1:\n\t\t\tself.rotateDuration = random.randint(200,400)\n\n\t\t# guide the moving direction of the bot\n\t\tif self.rotateDir <= 3 : # turn left\n\t\t\tself.avatarNP.setH(self.avatarNP.getH() + \\\n\t\t\t\t\t 40 * globalClock.getDt())\n\t\t\tself.rotateDuration -= 1\n\t\t\tif self.rotateDuration <= 0:\n\t\t\t\tself.rotateDuration = -1\n\t\t\t\tself.rotateDir = -1\n\t\telif self.rotateDir <= 6 : # turn right\n\t\t\tself.avatarNP.setH(self.avatarNP.getH() - \\\n\t\t\t\t\t 50 * globalClock.getDt())\n\t\t\tself.rotateDuration -= 1\n\t\t\tif self.rotateDuration <= 0:\n\t\t\t\tself.rotateDuration = -1\n\t\t\t\tself.rotateDir = -1\n\t\telif self.rotateDir == 7 : # turn big left\n\t\t\tself.avatarNP.setH(self.avatarNP.getH() + \\\n\t\t\t\t\t 102 * globalClock.getDt())\n\t\t\tself.rotateDuration -= 1\n\t\t\tif self.rotateDuration <= 0:\n\t\t\t\tself.rotateDuration = -1\n\t\t\t\tself.rotateDir = -1\n\t\telif self.rotateDir == 8 : # turn big right\n\t\t\tself.avatarNP.setH(self.avatarNP.getH() - \\\n\t\t\t\t\t 102 * globalClock.getDt())\n\t\t\tself.rotateDuration -= 1\n\t\t\tif self.rotateDuration <= 0:\n\t\t\t\tself.rotateDuration = -1\n\t\t\t\tself.rotateDir = -1\n\t\telse :\n\t\t\tself.rotateDuration -= 1\n\t\t\tif self.rotateDuration <= 0:\n\t\t\t\tself.rotateDuration = -1\n\t\t\t\tself.rotateDir = -1\n\t\t\tself.avatarNP.setFluidPos(self.avatarNP, 0,\n\t\t\t\t\t-1 * globalClock.getDt(),\n\t\t\t\t\tself.avatarNP.getZ() )\n\t\t# moving forward\n\t\t#self.avatarNP.setFluidPos(self.avatarNP, 0,\n\t#\t\t\t\t-1 * globalClock.getDt(),\n\t#\t\t\t\tself.avatarNP.getZ() )\n\t\treturn\n\t\t\"\"\"", "def click_aim(self, pos):\n x, y = pos\n if (self.x - x) ** 2 + (self.y - y) ** 2 <= self.r ** 2:\n self.color = random.choice(COLORS)\n self.x = randint(100, 1000)\n self.y = randint(100, 800)\n self.r = randint(50, 100)\n self.speed_x = randint(-200, 200)\n self.speed_y = randint(-200, 200)\n return True\n else:\n return False", "def move(self):\n assert self.is_alive, \"Sprite is dead, and should not be able to move\"\n if self.health > 3:\n self.y += random.randint(-1, 1) # change by -1, 0, 1\n self.x += random.randint(-1, 1) # change by -1, 0, 1\n print(self.name, \"moves to position\", str(self.x), \",\", str(self.y))", "def move(self, direction):\n\n if direction == \"north\":\n self.go_and_update(-1, 0)\n\n elif direction == \"south\":\n self.go_and_update(1, 0)\n\n elif direction == \"east\":\n self.go_and_update(0, 1)\n\n elif direction == \"west\":\n self.go_and_update(0, -1)", "def randwalk(n):\n\tx=0;\n\ty=0;\n\n\tfor i in range(n):\n\t\tstep = random.choice(['N','S','E','W'])\n\t\tif step== 'N':\n\t\t\ty=y+1\n\t\telif step=='S':\n\t\t\ty=y-1\n\t\telif step=='E':\n\t\t\tx=x+1\n\t\telse:\n\t\t\tx=x-1\n\treturn (x,y)", "def moveStep(self):\n\t\tif self.pos[0] <= self.boundsX[0] or \\\n\t\t(self.pos[0]+ 2*(self.radius)) >= self.boundsX[1]:\n\t\t\tself.dir[0] *= -1\n\t\t\t\n\t\tself.pos[0] += self.dir[0]*self.speed\n\t\tself.pos[1] += self.dir[1]*self.speed", "def move(self):\n\n choices = []\n if self.game.green_apples > 0:\n choices.append(self.game.move_green)\n if self.game.red_apples > 0:\n choices.append(self.game.move_red)\n if self.game.blue_plums > 0:\n choices.append(self.game.move_blue)\n if self.game.yellow_pears > 0:\n choices.append(self.game.move_yellow)\n\n random_index = random.randint(0, len(choices) - 1)\n f = choices[random_index]\n f(True)", "def shuffle(self):\n self.turn_by_deg(20)\n time.sleep(.25)\n self.fwd()\n time.sleep(1)\n self.stop()\n self.back()\n time.sleep(1)\n self.stop()\n self.turn_by_deg(-40)\n time.sleep(.25)\n self.fwd()\n time.sleep(1)\n self.back()\n time.sleep(1)\n self.stop()", "def updatePositionAndClean(self):\n self.direction = random.random()*360\n newPosition = self.position.getNewPosition(self.direction, self.speed)\n #test if newPosition is in the room\n if self.room.isPositionInRoom(newPosition):\n self.room.cleanTileAtPosition(newPosition)\n self.setRobotPosition(newPosition)\n else:\n #new position is NOT in the room generate a new direction, wait until next call to updatePosition\n self.direction = random.random()*360\n\n #raise NotImplementedError", "def sample(self, d):\n dist = rnd.uniform(0,self.length)\n w = rnd.normal(0,self.width)\n d.pos = np.dot(rotmat(self.angle), [dist, w]) + self.pos\n d.ownpos = self.pos", "def random_walk(n):\n\tx, y = 0, 0\n\tfor i in range(n):\n\t\tstep = random.choice(['N', 'S', 'E', 'W'])\n\t\tif step == 'N':\n\t\t\ty += 1\n\t\tif step == 'S':\n\t\t\ty -= 1\n\t\tif step == 'E':\n\t\t\tx += 1\n\t\tif step == 'W':\n\t\t\tx -= 1\n\treturn (x, y)", "def ball_generate_velocity(direction):\n x_comp = random.randrange(120,240) / 60.0\n y_comp = -(random.randrange(60,180) / 60.0)\n if direction != RIGHT:\n x_comp = -x_comp\n return [x_comp, y_comp]" ]
[ "0.76126647", "0.7555828", "0.7380981", "0.7263461", "0.71452343", "0.7137276", "0.69984514", "0.6989068", "0.6987881", "0.69832087", "0.6966477", "0.6943634", "0.6943634", "0.6928557", "0.6794044", "0.67522436", "0.6749555", "0.6610872", "0.6600256", "0.6551586", "0.6535305", "0.6532271", "0.6521677", "0.6516652", "0.6481078", "0.6451033", "0.64122623", "0.6405389", "0.6399362", "0.6399362", "0.6376593", "0.6375055", "0.63656735", "0.6362966", "0.6362966", "0.63510567", "0.63369006", "0.63177884", "0.6302813", "0.6301858", "0.6273751", "0.62649643", "0.6239392", "0.6238199", "0.62310326", "0.6176326", "0.6175139", "0.6167084", "0.61522573", "0.6142437", "0.61240774", "0.6114547", "0.6111778", "0.6105632", "0.61004716", "0.6096357", "0.6085176", "0.6084841", "0.60698205", "0.6055485", "0.605031", "0.6046813", "0.6039243", "0.6032929", "0.6023065", "0.6016019", "0.5997578", "0.59937304", "0.5990079", "0.5985824", "0.5982648", "0.59806657", "0.59779143", "0.5976364", "0.5970379", "0.5969488", "0.5966626", "0.59659195", "0.596439", "0.596439", "0.5964347", "0.59523845", "0.59345406", "0.5931269", "0.5930309", "0.592949", "0.5922484", "0.5922236", "0.5918633", "0.5917299", "0.5912015", "0.589322", "0.5875681", "0.58728087", "0.5866626", "0.5861285", "0.586079", "0.5849344", "0.58458704", "0.58411545" ]
0.6732395
17
Change movement speed of the actor
def change_velocity(self, delta): self.velocity += delta
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move(self):\n self.position += self.speed", "def set_speed(self, new_speed):\n self.__x_speed, self.__y_speed = new_speed", "def movementSpeedModifier(self):\n return 0", "def walk(self):\n self.speed = self.speed + (0.2 * self.legs)", "def move_turtle(self):\n self.forward(self.move_speed)", "def movespeed(self, speed):\n self._speed = speed", "def increase_speed(self):\n self.ship_speed_factor *= self.speed_up_scale\n self.bullet_speed_factor *= self.speed_up_scale\n self.alien_speed_factor *= self.speed_up_scale", "def set_speed(self, speed=0):\n speed = clamp(speed)\n self._state.speed = speed\n self.send_command(Command.SET_SPEED, [int(speed)])", "def increase_speed(self):\n self.target_speed *= self.speedup_scale\n self.bullet_speed_factor *= self.speedup_scale", "def move(self) -> None:\n\n new_x = self.getX() + self.speed[0]\n new_y = self.getY() + self.speed[1]\n self.setX(new_x)\n self.setY(new_y)", "def set_speed(self,speed):\n self.speed = speed", "def move_set_speed(self, speed):\n # self.motor_set_speed(MOTOR_LEFT, speed)\n # self.motor_set_speed(MOTOR_RIGHT, speed)\n self.move_speed = speed\n print(\"move_speed is now:\", self.move_speed)", "def set_motor_speed(self, speed=0.0):\r\n self.target_speed = speed", "def increase_speed(self, character):\n character.speed = min(character.max_steps/4, character.speed * 1.25)", "def move(self) -> None:\r\n self._x += self._speed", "def set_speed(self, speed):\n self.speed = speed", "def changespeed(self, x, y):\n self.change_x += x\n self.change_y += 200", "def change_speed(self, action):\r\n if action == \"faster\":\r\n self.speed += 1\r\n else:\r\n if self.speed > 1:\r\n self.speed -= 1", "def move(self, action): # Good\n if action == 0:\n dx, dy = 0, 1\n elif action == 1:\n dx, dy = 1, 0\n elif action == 2:\n dx, dy = 0, -1\n elif action == 3:\n dx, dy = -1, 0\n else:\n dx, dy = 0, 0\n\n # Check for max speed\n if ((self.vel_x + dx)**2 + (self.vel_y + dy)**2) \\\n <= self.max_speed_sq:\n self.x_vel += dx\n self.y_vel += dy\n\n self.prev_pos = self.center\n super(Player, self).move()", "def set_speed(self, level):\n speed = self.SPEED + (self.SPEED_INCREMENT * level)\n\n if self.lane % 2:\n # Move to the right\n self.velocity = (speed, 0)\n else:\n # Move to the left\n self.velocity = (-speed, 0)", "def set_speed(self, speed):\n self._kernel.set_speed(float(speed))", "def increase_car_speed(self):\r\n self.car_speed += 5", "def changespeed(self, x, y):\n self.change_x += x\n self.change_y += y", "def changespeed(self, x, y):\n self.change_x += x\n self.change_y += y", "def changespeed(self, x, y):\n self.change_x += x\n self.change_y += y", "def set_speed(self,speed):\n self.speed_p = speed", "def increase_speed(self):\n self.ship_speed_factor *= self.speedup_scale\n self.bullet_speed_factor *= self.speedup_scale\n self.alien_speed_factor *= self.speedup_scale\n self.alien_points = int(self.alien_points * self.score_scale)", "def _move(self, dx, dy):\n # horizontal velocity is dx, vertical velocity is dy\n self._player.set_velocity((dx, dy))", "def update(self):\n\n self.rect.y += self.speed", "def move(self, vel):\n self.y += vel", "def speed(self, value: float):\n self._speed = value", "def set_speed(self, SHIP_MOVEMENT):\n self._speed = SHIP_MOVEMENT", "def increase_speed(self):\n self.covid_horizontal_speed_factor *= self.speedup_scale\n self.bullet_speed_factor *= self.speedup_scale\n self.hero_speed_factor *= self.speedup_scale", "def set_speed (self, dx = None, dy = None) :\n if dx != None :\n self.speed[0] = dx\n if dy != None :\n self.speed[1] = dy", "def increment_speed(self):\n self.speed += 0.0004", "def increase_speed(self):\n self.ship_speed_factor *= self.speed_up_scale\n self.bullet_speed_factor *= self.speed_up_scale\n self.alien_speed_factor *= self.speed_up_scale\n\n self.alien_points = int(self.alien_points * self.score_scale)", "def move(self):\n self.x += math.sin(self.angle) * self.speed\n self.y -= math.cos(self.angle) * self.speed\n # Next, account for gravity\n (self.angle, self.speed) = addVectors((self.angle, self.speed), gravity)\n # Then, friction / drag\n self.speed *= drag", "def increase_speed(self):\n self.state['speed_boost'] = True\n self.speed = self.maze.block_size / 8", "def set_cmd_velocity(self, speed):\n self.gripper_io.set_signal_value(\"speed_mps\", speed)", "def changespeed(self, x):\n self.change_x += x * self.velocity", "def increase_speed(self):\n self.ship_speed += self.speedup_scale\n self.bullet_speed += self.speedup_scale\n self.alien_speed += self.speedup_scale\n\n self.alien_points = int(self.alien_points * self.score_scale)", "def accelerate(self):\n\t\tself.velocity += self.direction * self.ACCELERATION", "def move(self, speed=1):\n self.set_motor(self.left_motor, 'left', speed)\n self.set_motor(self.right_motor, 'right', speed)\n time.sleep(0.5)", "def set_speed(self, speed):\r\n speed = float(speed)\r\n speed = int(round(speed * 27.7778))\r\n return self.send_command('speed %s' % speed)", "def speed(self, speed):\n self._speed = speed\n self._rotspeed = speed", "def changespeed(self, x1, y1):\n self.change_x += x1\n self.change_y += y1", "def adjustSpeed(self, speed):\n\t\tif self.timeout <= 0:\n\t\t\tself.speed = max(self.minimumSpeed, min(self.maximumSpeed, self.speed + speed))", "def set_speed():\n pass", "def __move__(self):\n v = self.velocity\n p = self.position\n p += v\n self.rect.x = round(p.x)\n self.rect.y = round(p.y)", "def set_speed(speed):\n if speed >255:\n speed =255\n elif speed <0:\n speed =0\n set_left_speed(speed)\n #time.sleep(.1)\n set_right_speed(speed)", "def increase_speed(self):\n self.ship_speed*=self.speedup_scale\n self.bullet_speed*=self.speedup_scale\n self.alien_speed*=self.speedup_scale\n self.alien_points=int(self.alien_points*self.score_scale)\n print(self.alien_points)", "def set_speed(self, speed):\n # create the MAV_CMD_DO_CHANGE_SPEED command\n msg = self.message_factory.command_long_encode(0, 0,mavutil.mavlink.MAV_CMD_DO_CHANGE_SPEED,0,0,speed,0, 0, 0, 0, 0)\n\n # send command to vehicle\n self.send_mavlink(msg)\n self.flush()", "def on_key_press(self, key, modifiers):\n if key == arcade.key.UP:\n self.player.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player.change_y = -MOVEMENT_SPEED\n elif key == arcade.key.LEFT:\n self.player.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player.change_x = MOVEMENT_SPEED", "def set_speed(self, ratio):\n self._speed = ratio", "def on_key_press(self, key, modifiers):\r\n if key == arcade.key.UP:\r\n self.player.change_y = MOVEMENT_SPEED\r\n elif key == arcade.key.DOWN:\r\n self.player.change_y = -MOVEMENT_SPEED\r\n elif key == arcade.key.LEFT:\r\n self.player.change_x = -MOVEMENT_SPEED\r\n elif key == arcade.key.RIGHT:\r\n self.player.change_x = MOVEMENT_SPEED", "def on_key_press(self, key, modifiers):\r\n if key == arcade.key.UP:\r\n self.player.change_y = MOVEMENT_SPEED\r\n elif key == arcade.key.DOWN:\r\n self.player.change_y = -MOVEMENT_SPEED\r\n elif key == arcade.key.LEFT:\r\n self.player.change_x = -MOVEMENT_SPEED\r\n elif key == arcade.key.RIGHT:\r\n self.player.change_x = MOVEMENT_SPEED", "def speed(self, s=0):", "def update(self):\n self.rect.y -= self.y_speed # Pawns move up the screen at the speed specified", "def move(self) -> None:\n self.delta_time += 1 / 30 # FPS is 30 frames per second\n\n if self.is_active:\n self.y -= self.velocity * self.delta_time + 0.5 * self.gravity * (self.delta_time ** 2) # s = ut + 0.5at^2\n self.velocity = self.velocity + self.gravity * self.delta_time # v = u + at\n\n # Limit the velocity to the terminal velocity\n self.velocity = max(self.terminal_velocity, self.velocity)\n\n # Limit the y-pos to within the top of the screen and the base\n self.y = min(max(0, self.y), BACKGROUND_SPRITE.get_height() - Base.Height - Bird.Height)\n\n # Animation\n # -e^-x graph is found suitable for the slow descent\n # The value of the function converges to -90 as x peaks out at 4.5\n # The value of the function converges to 0 as x becomes negative\n self.angle = -np.exp(self.velocity / self.terminal_velocity * 4.5) + (self.velocity > 0) * self.up_angle\n else:\n self.y = self.init_y + np.sin(self.delta_time * np.pi) * self.glide_height", "def move(self):\n self.pos += self.vel\n self.rect.center = self.pos", "def move(self):\r\n min_x = self.__screen.SCREEN_MIN_X\r\n min_y = self.__screen.SCREEN_MIN_Y\r\n delta_x = self.__screen.SCREEN_MAX_X - min_x\r\n delta_y = self.__screen.SCREEN_MAX_Y - min_y\r\n\r\n # new location formula according to pdf.\r\n new_x = (self.__x_speed + self.__x - min_x) % delta_x + min_x\r\n new_y = (self.__y_speed + self.__y - min_y) % delta_y + min_y\r\n self.__x, self.__y = new_x, new_y", "def update(self):\n\t\tself.y += (self.settings.target_speed * self.target_direction)\n\t\tself.rect.y = self.y", "def movement(self):", "def set_speed(self, speed):\n return self.bot_client.send_command(_Command.SetSpeed, speed)", "def set_animation_speed(self, speed):\n self.m_animation_speed = self.calculate_animation_speed(speed)", "def update(self):\n self.x += (self.ai_settings.alien_speed_factor *\n self.ai_settings.fleet_direction)\n self.rect.x = self.x", "def update(self):\n self.velocity = [math.cos(self.angle), - math.sin(self.angle)]\n self.velocity = [self.speed * i for i in self.velocity]\n\n super().update()", "def move(self):\n\t\tself.tick_count += 1\n\n\t\tself.y += self.height + self.vel*(self.tick_count/30) + 0.5*(9.8)*(self.tick_count)**2 # calculate displacement\n\n\t\tif self.vel > 0: # tilt down\n\t\t\tself.tilt -= 0.5\n\t\telse: # tilt up\n\t\t\tself.tilt += 0.5", "def move(self, direction, speed):\n self.motor_A(direction, speed)\n self.motor_B(direction, speed)", "def change_motor_speed(self, speed=0.0):\r\n if not self.enabled:\r\n self.set_neutral(braked=False)\r\n return\r\n\r\n # logging.info(\"{} Motor Speed: {}\".format(self.motor_name, speed))\r\n self.current_speed = speed # Store current set speed\r\n\r\n # If speed is < 0.0, we are driving in reverse.\r\n self.forward = True\r\n if speed < 0.0:\r\n # Normalise speed value to be in range [0, 100]\r\n speed = -speed\r\n # Store direction\r\n self.forward = False\r\n\r\n # Apply a factor to the speed to limit speed\r\n speed *= self.speed_factor\r\n\r\n # Set motor directional pins\r\n if self.forward:\r\n if self.a_pin >= 0:\r\n self.GPIO.output(self.a_pin, 1)\r\n if self.b_pin >= 0:\r\n self.GPIO.output(self.b_pin, 0)\r\n else:\r\n if self.a_pin >= 0:\r\n self.GPIO.output(self.a_pin, 0)\r\n if self.b_pin >= 0:\r\n self.GPIO.output(self.b_pin, 1)\r\n\r\n # Convert speed into PWM duty cycle\r\n # and clamp values to min/max ranges.\r\n dutycycle = speed\r\n if dutycycle < 0.0:\r\n dutycycle = 0.0\r\n elif dutycycle > self.max_speed:\r\n dutycycle = self.max_speed\r\n\r\n # Change the PWM duty cycle based on fabs() of speed value.\r\n self.PWM.ChangeDutyCycle(dutycycle)", "def increase_aliens_speed(self):\r\n self.alien_speed_factor += 0.01\r\n self.alien_bullet_speed_factor += 0.02", "def set_speed(self, speed: str) -> None:\n self.wink.set_state(True, speed)", "def move(self,dt):\n self.x_pos += self.x_vel*dt\n self.y_pos += self.y_vel*dt", "def setSpeed(self, v):\n\t\tconverted = self.convertSpeed(v)\n\t\tprint(converted)\n\t\t# set both stage speeds\n\t\tself.zaberSend(self.translation[\"hor\"], self.cmd[\"setTargetSpeed\"], data = converted)\n\t\tself.zaberSend(self.translation[\"ver\"], self.cmd[\"setTargetSpeed\"], data = converted)", "def set_speed(self,value):\n if (value>self.get_max_speed()):\n print \"asked to set the speed to %f but the max speed is %f\\n\" % (value,self.get_max_speed())\n else:\n return self.put_par(\"slew_speed\",value)", "def set_speed(self, v):\n self.v = v", "def update(self):\r\n # change in position -> velocity\r\n self.position += self.velocity\r\n # change in celocity -> acceleration\r\n self.velocity += self.acceleration\r\n \r\n # if velocity magnitude is higher than the defined limit set the velocity \r\n # magnitude to max speed\r\n if np.linalg.norm(self.velocity) > self.max_speed:\r\n self.velocity = self.velocity / np.linalg.norm(self.velocity) * self.max_speed\r\n \r\n # reset the acceleration\r\n self.acceleration = Vector(*np.zeros(2))", "def changeSpeed(self, speed, accel):\n\t\t\n max_speed = 1000\n min_speed = 0\n \n # limit max speed\n if speed >= max_speed:\n speed = max_speed\n \n # limit min speed\n if speed <= min_speed:\n speed = min_speed\n \n command = struct.pack(\"<BHHB\", 0x24, speed, accel, 0x01)\n self.sendCommand(command)", "def change_object(self, x, y, interval, time_to_crash):\n self.speed = round(math.sqrt((x - self.x1) ** 2 + (y - self.y1) ** 2) / interval)\n self.move_to(x, y)", "def set_speed(self, axis, speed):\n #log.info(f\"set speed {axis} {speed}\")\n self.cmd_axis_speed[axis] = speed", "def _update_speed(self, speed):\n if speed is None:\n return\n if speed == self._current_speed:\n return\n\n self._current_speed = speed\n self._update_speed_attributes()\n LOG.info(\n f\"Updated LUNOS {self._name}: {self.percentage}% {self._current_speed}\"\n )", "def set_speed_y(self, new_speed):\n self.__speed_y = new_speed", "def on_speed_change(self, event) -> None:\r\n\r\n speed_level = int(self.speed_scale.get())\r\n self.animator.time_per_gen = self.TIMES_PER_GEN[speed_level]", "def ChangeWindSpeed(self,speed):\n self.problem.ChangeWindSpeed(speed)", "def on_key_press(self, key, modifiers):\n\n if key == arcade.key.UP:\n self.player_sprite.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player_sprite.change_y = -MOVEMENT_SPEED\n elif key == arcade.key.LEFT:\n self.player_sprite.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player_sprite.change_x = MOVEMENT_SPEED", "def setMotorSpeed(self,velRight=500,velLeft=500):\n cmd = 'D,'+str(velRight)+','+str(velLeft)\n self.sendCmd(cmd)", "def accelerateForwards(self,movementSpeed=0.1):\n self.xMomentum+=math.sin(self.faceHeading*(math.pi/180))*movementSpeed\n self.yMomentum+=math.cos(self.faceHeading*(math.pi/180))*movementSpeed", "def turnspeed(self, rotspeed):\n self._rotspeed = rotspeed", "def set_speed(self, speed):\n assert isinstance(speed, float), \"Must be a float\"\n \n if speed < 0.0:\n raise ValueError(\"Negative speeds not supported\")\n \n self.speed = speed", "def set_move_speed(cls, quad):\n\n\t\tspeed = cls.get_address_value(quad.result)\n\t\treturn speed/1000.0", "def setMovement(self, movement, isSpecial = False, canControl = True):\n\n vel = self.ode_body.getLinearVel()\n for i in range(len(self.direction)):\n vel[i] = self.direction[i] * movement\n\n self.ode_body.setLinearVel(vel)\n\n self.moveVal = self.direction\n self.moveSpecial = isSpecial\n self.isMove = [False, False]\n self.direction = [self.moveVal[0], self.moveVal[1]]\n\n if not canControl:\n self.knockback()\n self.moveLock(None, 9999)\n self.isKnockback = True\n \n # Play Sound\n if movement > 10:\n self.sfx['lunge'].play()", "def step(self):\r\n\r\n self.velocity = 1\r\n new_pos = self.pos\r\n self.model.space.move_agent(self, new_pos)", "def move_car(self):\n a = self.h / 50\n self.x += self.speed_x / FPS\n if self.x + 170 * a >= 1100:\n self.dir = -1\n self.speed_x = -self.speed_x\n if self.x - 170 * a <= 50:\n self.dir = 1\n self.speed_x = -self.speed_x", "def accelerate(self):\n x_speed = self.__calc_speed(Ship._X)\n y_speed = self.__calc_speed(Ship._Y)\n self._speed_vect = (x_speed, y_speed)", "def move(self, linear_speed, angular_speed):\n twist = Twist()\n twist.linear.x = linear_speed\n twist.angular.z = angular_speed\n self.pub.publish(twist)", "def _move_actor(self, actor):\n\n actor.center_x = actor.center_x + actor.change_x\n actor.center_y = actor.center_y + actor.change_y", "def set_speed(self, speed):\n self.device.set_speed(speed)\n return \"OK\"", "def update(self):\r\n # Update the decimal position of the kame.\r\n self.y -= self.speed_factor\r\n # Update the rect position.\r\n self.rect.y = self.y", "def player_movement(self):" ]
[ "0.7516971", "0.7302032", "0.7290268", "0.7254367", "0.7236787", "0.7232281", "0.7176142", "0.7162881", "0.7147804", "0.7146328", "0.7095998", "0.7064958", "0.7046144", "0.69934225", "0.69770086", "0.6971563", "0.6969466", "0.69632304", "0.69601864", "0.6959585", "0.6946217", "0.69408303", "0.6908239", "0.6908239", "0.6908239", "0.69028145", "0.689167", "0.6842269", "0.6802566", "0.67831147", "0.67824954", "0.6777748", "0.67643", "0.67620313", "0.6747906", "0.6744122", "0.6692175", "0.6689062", "0.6676623", "0.66627514", "0.66428626", "0.6620402", "0.6619754", "0.66064763", "0.6604085", "0.6602377", "0.6585374", "0.65766853", "0.6576649", "0.6574732", "0.65735847", "0.6559291", "0.6544689", "0.6534292", "0.6527769", "0.6527769", "0.6525515", "0.65250653", "0.6517047", "0.6513207", "0.6511234", "0.6510269", "0.64996886", "0.6493035", "0.64841807", "0.64555", "0.6451715", "0.6438643", "0.64260817", "0.6424033", "0.64103323", "0.64080966", "0.6401036", "0.6397294", "0.6391932", "0.63908726", "0.6387311", "0.6385018", "0.63667953", "0.6359506", "0.6356688", "0.6356021", "0.63428247", "0.63384014", "0.63343275", "0.6332582", "0.63177323", "0.6310868", "0.62979615", "0.62952894", "0.6291625", "0.6290523", "0.6289138", "0.6281053", "0.6278569", "0.62737477", "0.626667", "0.6265549", "0.6263626" ]
0.6720854
37
Change the position of the agent to show movement
def update_position(self): self.position[0] += self.velocity[0] self.position[1] += self.velocity[1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move(self):\n \n self.position = self.wander()", "def movement(self):", "def move(self):\n possible_steps = self.model.grid.get_neighborhood(\n self.pos,\n moore=False, # implements Von Neumann neighborhood\n include_center=False)\n new_position = self.random.choice(possible_steps)\n self.heading = [new_position[0] - self.pos[0],\n new_position[1] - self.pos[1]]\n self.model.grid.move_agent(self, new_position)", "def move_agent(self, agent):\n id_ = agent.id_\n p = agent.mobility.current\n x, y = to_geometry(p[0]), to_geometry(p[1])\n print('move agent{} {} {}'.format(id_, x, y))\n print('move agentr{} {} {}'.format(id_, x, y))", "def move(self):\n \n self.position = self.explore()", "def step(self):\r\n\r\n self.velocity = 1\r\n new_pos = self.pos\r\n self.model.space.move_agent(self, new_pos)", "def move(self):\r\n min_x = self.__screen.SCREEN_MIN_X\r\n min_y = self.__screen.SCREEN_MIN_Y\r\n delta_x = self.__screen.SCREEN_MAX_X - min_x\r\n delta_y = self.__screen.SCREEN_MAX_Y - min_y\r\n\r\n # new location formula according to pdf.\r\n new_x = (self.__x_speed + self.__x - min_x) % delta_x + min_x\r\n new_y = (self.__y_speed + self.__y - min_y) % delta_y + min_y\r\n self.__x, self.__y = new_x, new_y", "def move():\n Robot.move()", "def move_tower(self, x, y):\n self.x = x\n self.y = y\n self.menu.x = x\n self.menu.y = y\n self.menu.update()", "def move(self,x,y):\n self.pos.x = x\n self.pos.y = y", "def joystick_move(self, emphasis=1):\n step = int(20*emphasis)\n self.display.ship.move_vertical(step=step)", "def move(self, p):\r\n self.position.setvalue(p)", "def moving(self,newX,newY):\n LOGGER.debug(\"{} moved to {} | {}\".format(self.physic_id,newX,newY))\n lazzyUpdate().sendTrame(self.physic_id,{\"coordX\":newX,\"coordY\":newY})", "def set_new_location(self, xPos, yPos):", "def movement(self):\n self.rect.left -= self.speedx #to move the asteroid to the left", "def player_movement(self):", "def move(self):\n pass", "def move(self) -> None:\n\n new_x = self.getX() + self.speed[0]\n new_y = self.getY() + self.speed[1]\n self.setX(new_x)\n self.setY(new_y)", "def move_to_position1(self):", "def reset_position(self): \n self.rect.x = 400\n self.rect.y = 400\n \n # Specifies the Player's spawnpoint as maze_arrangement[8][8], representing\n # the tile in the center of the maze \n self.__minotaur_x = 8\n self.__minotaur_y = 8", "def _move_actor(self, actor):\n\n actor.center_x = actor.center_x + actor.change_x\n actor.center_y = actor.center_y + actor.change_y", "def teleport(self, x, y):\n self.rect.x = x\n self.rect.y = y", "def automove(self):\n if self.x < self.end_cinematic_x_pos:\n self.x += self.SHIP_SPEED\n if self.x > self.end_cinematic_x_pos:\n self.x -= self.SHIP_SPEED\n if self.y < self.end_cinematic_y_pos:\n self.y += self.SHIP_SPEED\n if self.y > self.end_cinematic_y_pos:\n self.y -= self.SHIP_SPEED", "def move_to_position2(self):", "def move_turtle(self):\n self.forward(self.move_speed)", "def setPosition(self):\n # determine posX, posY for battle\n (x1,y1) = globals.battlemapQuadrants[self.systemGrid]\n self.posX = x1+self.setX\n self.posY = y1+self.setY", "def move(self) -> None:\n self.delta_time += 1 / 30 # FPS is 30 frames per second\n\n if self.is_active:\n self.y -= self.velocity * self.delta_time + 0.5 * self.gravity * (self.delta_time ** 2) # s = ut + 0.5at^2\n self.velocity = self.velocity + self.gravity * self.delta_time # v = u + at\n\n # Limit the velocity to the terminal velocity\n self.velocity = max(self.terminal_velocity, self.velocity)\n\n # Limit the y-pos to within the top of the screen and the base\n self.y = min(max(0, self.y), BACKGROUND_SPRITE.get_height() - Base.Height - Bird.Height)\n\n # Animation\n # -e^-x graph is found suitable for the slow descent\n # The value of the function converges to -90 as x peaks out at 4.5\n # The value of the function converges to 0 as x becomes negative\n self.angle = -np.exp(self.velocity / self.terminal_velocity * 4.5) + (self.velocity > 0) * self.up_angle\n else:\n self.y = self.init_y + np.sin(self.delta_time * np.pi) * self.glide_height", "def move_to_start(self):\n self.pos = (SCREEN_WIDTH / 2, SCREEN_HEIGHT - 64)", "def update(self):\n # Update the decimal position of the beam. \n self.x += self.settings.laser_speed\n # Update the rect position.\n self.rect.x = self.x", "def automove_to(self, x: int, y: int) -> None:\n self.cpu_controlled = True\n self.end_cinematic_x_pos = x\n self.end_cinematic_y_pos = y", "def move(self, X, Y):\n self.menu.x, self.menu.y = X, Y\n self.x, self.y = X, Y\n self.menu.update_buttons()", "def move(self):\n\n # get the location we WOULD go to\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n while (abs (newX) > self.BOX_RANGE) or (abs(newY) > self.BOX_RANGE):\n # print(\"choosing new direction... \",end=\"\")\n self.chooseNewDirection()\n # print(self.dx, self.dy)\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n\n # now move our monster\n super().move()", "def change_pos(self, direction):\n if direction == Direction.UP:\n self._y_pos -= 1\n elif direction == Direction.DOWN:\n self._y_pos += 1\n elif direction == Direction.LEFT:\n self._x_pos -= 1\n elif direction == Direction.RIGHT:\n self._x_pos += 1\n self._coordinates = self.coordinates()", "def move(self, agent, action):\n\t\tpass", "def move():\n print(\" ------ Execution -----\\n\")\n pyautogui.moveRel(0, 10)\n pyautogui.moveRel(0, -10)\n pyautogui.click()", "def table_move_update():\n pos = self.variables.table.get_current_position()\n self.table_move_ui.x_move.setProperty(\"value\", int(pos[0]))\n self.table_move_ui.y_move.setProperty(\"value\", int(pos[1]))\n self.table_move_ui.z_move.setProperty(\"value\", int(pos[2]))", "def move(self, direction):\n # replace with your code\n pass", "def move(self, direction):\n # replace with your code\n pass", "def move_me_on_spawn(self):\r\n\t\tif self.points_to_go:\r\n\t\t\tself.start_pos = self.points_to_go[0]\r\n\t\t\tfor point in self.points_to_go[1:]:\r\n\t\t\t\tfor i in range(len(self.points_to_go[1:])):\r\n\t\t\t\t\tself.goal_pos = self.points_to_go[i]\r\n\t\t\t\t\t\r\n\t\t\t\t\tself.move_me()\r\n\t\t\t\t\t#self.start_pos = \r\n\t\t\t\t\t#print(self.goal_pos)\r\n\t\t\t\t\t#if self.move_me():\r\n\t\t\t\t\t#\ti += 1\r\n\t\t\t\t\t#\tprint('switch')\r", "def advance_simulation(self):\n ts = self.sim.getTimeStep()\n old_rpos = self.sim.getAgentPosition(self.robot_num)\n # Move all the agents towards their goals\n for agent in self.agents:\n p = self.sim.getAgentPosition(agent)\n g = self.goals[agent]\n vec = ((g[0] - p[0])/ts, (g[1] - p[1])/ts)\n self.sim.setAgentPrefVelocity(agent, vec)\n self.sim.doStep()\n # Check and see if the robot is in collision and reset to old position\n # if so (backtrack on this step)\n # for agent in self.agents:\n # if (agent != self.robot_num\n # and utils.dist(self.sim.getAgentPosition(self.robot_num),\n # self.sim.getAgentPosition(agent)) < (\n # self.sim.getAgentRadius(self.robot_num) +\n # self.sim.getAgentRadius(agent))):\n # self.sim.setAgentPosition(self.robot_num, old_rpos)\n if self.file is not None:\n self.update_visualization()", "def move(self):\n if random.random() < 0.5:\n self.y = (self.y + 1) % 100\n else:\n self.y = (self.y - 1) % 100\n if random.random() < 0.5:\n self.x = (self.x + 1) % 100\n else:\n self.x = (self.x - 1) % 100", "def move(self):\n self.pos += self.vel\n self.rect.center = self.pos", "def move (self):\n\t\tself.x += self.direction[0]\n\t\tself.y += self.direction[1]\n\t\tself.row = (self.y - 15) / 30\n\t\tself.col = (self.x - 15) / 30\n\t\tself.rowcol = (self.row,self.col)\n\t\tself.draw()", "def move_to(self, x, y):\r\n self.__current_room = x, y", "def move(self, rel_pos):\n self.pos = (self.pos[0] + rel_pos[0] * GRID, self.pos[1] + rel_pos[1] * GRID)", "def move(self):\n if self.ycor() > 280: self.y_dir = -1 # Set vertical movement to down if ball at top of screen\n if self.xcor() > 380: self.x_dir = -1 # Set horizontal movement to left if ball at right of screen\n if self.xcor() < -380: self.x_dir = 1 # Set horizontal movement to right if ball at left of screen\n new_x = self.xcor() + self.x_dir * 2 # Define 2 spaces forward in set horizontal dir of travel\n new_y = self.ycor() + self.y_dir * 2 # Define 2 spaces forward in set vertical dir of travel\n self.goto(new_x, new_y) # Move ball to newly defined position", "def setMovement(self, movement):\n self.ma = movement", "def movement(self, screen):\n if self.tx is not None and self.ty is not None: # Target is set\n\n X = self.x - self.tx\n Y = self.y - self.ty\n\n if X < 0: # --->\n self.img = pygame.image.load(next(self.walking_east_images))\n self.x += self.velocity\n elif X > 0: # <----\n self.img = pygame.image.load(next(self.walking_west_images))\n self.x -= self.velocity\n if Y > 0: # up\n self.img = pygame.image.load(next(self.walking_north_images))\n self.y -= self.velocity\n elif Y < 0: # dopwn\n self.img = pygame.image.load(next(self.walking_south_images))\n self.y += self.velocity\n screen.blit(self.img, (self.x, self.y))\n\n if X == 0 and Y == 0:\n self.tx, self.ty = None, None\n self.agent.actionCompleted()", "def set_robot_pos(self):\n\t\tx,y,z = self.geo2desiredENU(self.curr_lat, self.curr_lon, self.gpsAlt)\n\t\tself.robot_msg.point.x = x\n\t\tself.robot_msg.point.y = y\n\t\tself.robot_msg.point.z = z", "def setPosition(position):", "def move( self, event ):\n self.lastMotion = time()\n if self.follow == False: # If the follow flag is not set, motion within the widget will make the ToolTip dissapear\n self.withdraw()\n self.visible = 1\n self.geometry( '+%i+%i' % ( event.x_root+10, event.y_root+10 ) ) # Offset the ToolTip 10x10 pixes southwest of the pointer\n try:\n self.msgVar.set( self.msgFunc() ) # Try to call the message function. Will not change the message if the message function is None or the message function fails\n except:\n pass\n self.after( int( self.delay * 1000 ), self.show )", "def _move(self, dx, dy):\n # horizontal velocity is dx, vertical velocity is dy\n self._player.set_velocity((dx, dy))", "def move(self):\n self.position += self.speed", "def move_friendly(self):\n self.friendly_pos[0]+=self.x_speed\n self.friendly_pos[1]+=self.y_speed", "def update(self):\n self.x += (self.settings.alien_speed * self.settings.fleet_direction)\n self.rect.x = self.x", "def move(x,y):\r\n pass", "def move(self, x, y):\n\n #log.info(\"MOVE x:%s y:%s\", x, y)", "def update(self):\r\n self.x += (self.invasion_settings.alien_speed *\r\n self.invasion_settings.fleet_direction)\r\n self.rect.x = self.x", "def move(self):\n # using a formula of axis coordinates and speed modulus delta of the\n # screen axis plus the minimal screen size\n self.x_coord = \\\n (self.x_speed + self.x_coord - Screen.SCREEN_MIN_X) % delta_x + \\\n Screen.SCREEN_MIN_X\n self.y_coord = \\\n (self.y_speed + self.y_coord - Screen.SCREEN_MIN_Y) % delta_y + \\\n Screen.SCREEN_MIN_Y", "def reset_movement(self):\n self.direction = [0, 0]", "def move_to(self, x, y):\n self.x = x\n self.y = y", "def move_to(self, x, y):\n self.x = x\n self.y = y", "def _move(self, pos):\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos", "def advance(self): \n self.center.x = self.center.x + self.velocity.dx\n self.center.y = self.center.y + self.velocity.dy", "def move(self, pos):\n self.widget.move(*pos)", "def move(self):\n \n # checks for bots nearby\n next_move = self.follow()\n \n # finds a random move if no bot\n if next_move is self.position:\n self.position = self.wander()\n else:\n self.position = next_move", "def _onmove(self, event):", "def AeroMove(self, pos):\r\n\r\n pass", "def move_turtle(self, x, y):\n tortuga = self.turtle\n if self.capture_mode:\n tortuga.setheading(tortuga.towards(x, y))\n tortuga.setpos(x, y)\n self.add_punto(Punto(x, y))", "def cambiovelocidad(self,x,y):\n self.change_x += x\n self.change_y += y", "def setPosition(self):\n self.data['pos-x'] = \"%s\" % self.x()\n self.data['pos-y'] = \"%s\" % self.y()", "def move(self):\n if self.x_pos < const.screenwidth:\n self.x_pos += 1\n self.x_pos = self.x_pos\n\n self.draw()\n return", "def do_move(self, dx, dy):\n self.rect.move_ip(dx, dy)", "def move_motion(event):\n if x is None or y is None:\n return\n window.geometry('+{x:g}+{y:g}'.format(\n x=window.winfo_x() + (event.x - x),\n y=window.winfo_y() + (event.y - y),\n ))", "def update(self):\n self.rect.y -= self.y_speed # Pawns move up the screen at the speed specified", "def move(self, model):\n grid = model.grid\n possible_steps = grid.get_neighborhood(\n self.pos, moore=True, include_center=True)\n choice = random.choice(possible_steps)\n grid.move_agent(self, choice)", "def setDesiredPosition(self, x, y):\n (self.setX, self.setY) = (x , y)", "def init_position_electrodes_screen(self):\n self.line_shoulder_pos_l.hide()\n self.line_shoulder_pos_r.hide()\n self.txt_shoulder_pos_r.hide()\n self.txt_shoulder_pos_r.hide()", "def updatePos(self):\n self.setPos(self.centerX-self.boundingRect().width()/2.0,\n self.centerY-self.boundingRect().height()/2.0)", "def execute_action(self, a):\n x,y = self.agent\n self.agent = self._get_new_position(x,y,a)", "def move(self):\n assert self.is_alive, \"Sprite is dead, and should not be able to move\"\n if self.health > 3:\n self.y += random.randint(-1, 1) # change by -1, 0, 1\n self.x += random.randint(-1, 1) # change by -1, 0, 1\n print(self.name, \"moves to position\", str(self.x), \",\", str(self.y))", "def update_movement(self):\n if self.way_idx < len(self.waypoints) and not self.moving_object.is_moving:\n self.moving_object.start_moving(self.waypoints[self.way_idx])\n self.way_idx += 1", "def adjust_mario_position(self):\n self.last_x_position = self.mario.rect.right\n self.mario.rect.x += round(self.mario.x_vel)\n self.check_mario_x_collisions()\n\n if self.mario.in_transition_state == False:\n self.mario.rect.y += round(self.mario.y_vel)\n self.check_mario_y_collisions()", "def set_position(self, x, y):\n self.pos = pygame.Rect(x, y, 0, 0)", "def move_me(self):\r\n\t\t#self.start_pos = self.rect.center\t\t\t\r\n\t\tif self.goal_pos is not None:\r\n\t\t\tprint(f'goal_pos: {self.goal_pos}, start_pos: {self.start_pos}')\r\n\t\t\tdx = self.goal_pos[0] - self.start_pos[0]\r\n\t\t\tdy = self.goal_pos[1] - self.start_pos[1]\r\n\r\n\t\t\tdistance = math.sqrt(dx*dx + dy*dy)\r\n\t\t\tself.shift += self.speed\r\n\r\n\t\ttry:\r\n\t\t\tif self.shift/distance < 0.99:\r\n\t\t\t\tself.rect.center = (self.start_pos[0] + self.shift/distance * dx,\r\n\t\t\t\t\t\t\t\t\t self.start_pos[1] + self.shift/distance * dy)\r\n\t\t\t\tprint(f'going to: {self.goal_pos}')\r\n\t\texcept ZeroDivisionError:\r\n\t\t\t\tpass\t\r\n\t\treturn True", "def begin_auto_moving(self, direction):\n self.direction = direction\n self.image_list = self.animation_dict[direction]\n self.state = 'automoving'\n self.x_vel = self.vector_dict[direction][0]\n self.y_vel = self.vector_dict[direction][1]\n self.move_timer = self.current_time", "def move(self, new_location):\n self.current_location = new_location", "def setPos(self, pos):\n self.cameraNode.setPos(pos)", "def move(self, coordinates, direction):\n pass", "def mouse_move(self, pos):\n if (self.setup_type == \"position\"):\n x, y = pos\n self.canvas.move(x, y)", "def move(self, event):\n self.lastMotion = time()\n # If the follow flag is not set, motion within the\n # widget will make the ToolTip disappear\n #\n if self.follow is False:\n self.withdraw()\n self.visible = 1\n\n # Offset the ToolTip 10x10 pixes southwest of the pointer\n self.geometry('+%i+%i' % (event.x_root+20, event.y_root-10))\n try:\n # Try to call the message function. Will not change\n # the message if the message function is None or\n # the message function fails\n self.msgVar.set(self.msgFunc())\n except:\n pass\n self.after(int(self.delay * 1000), self.show)", "def move_agent(self, state):\n m = self.m\n n = self.n\n\n cur_env = deepcopy(state.grid)\n cur_env[m, n] = 0\n action = self.choose_action(state)\n\n if action == 'Right':\n if n + 1 >= grid_size or cur_env[m][n+1] != 0:\n Rew = -2 # Reward -5 if we move into wall or another agent\n self.collisions += 1\n else:\n n += 1\n Rew = -0.1 # Reward -1 otherwise\n a = 0 # Action number\n elif action == 'Left':\n if n - 1 < 0 or cur_env[m][n-1] != 0:\n Rew = -2\n self.collisions += 1\n else:\n n -= 1\n Rew = -0.1\n a = 1\n elif action == 'Up':\n if m - 1 < 0 or cur_env[m-1][n] != 0:\n Rew = -2\n self.collisions += 1\n else:\n m -= 1\n Rew = -0.1\n a = 2\n elif action == 'Down':\n if m + 1 >= grid_size or cur_env[m+1][n] != 0:\n Rew = -2\n self.collisions += 1\n else:\n m += 1\n Rew = -0.1\n a = 3\n\n m = m % grid_size\n n = n % grid_size\n self.m = m # Update position of agent\n self.n = n # Update position of agent\n cur_env[m][n] = 1 # Update grid\n new_state = State(cur_env, [m, n]) # Set new state\n terminal = False\n\n if [m, n] == self.end:\n Rew = 10\n terminal = True\n self.carry = True\n\n return new_state, a, Rew, terminal", "def specific_reset(self) -> None:\n self.old_velocity = 0.\n self.agent.specific_reset()\n max_dist_to_origin = 4.\n min_dist_to_origin = 2\n\n agent_pos = np.random.uniform(-max_dist_to_origin, max_dist_to_origin, 2)\n positioning_done = False\n while not positioning_done:\n agent_pos = np.random.uniform(-max_dist_to_origin,\n max_dist_to_origin, 2)\n if min_dist_to_origin <= np.linalg.norm(agent_pos) <= max_dist_to_origin:\n positioning_done = True\n\n # adjust the height of agent\n agent_pos = np.concatenate((agent_pos[:2], [self.agent.init_xyz[2]]))\n self.agent.set_position(agent_pos)\n\n # set agent orientation in forward run direction\n y = angle2pos(self.agent.get_position(), np.zeros(3)) + np.pi / 2\n y += self.agent.init_rpy[2]\n quaternion = self.bc.getQuaternionFromEuler([0, 0, y])\n self.agent.set_orientation(quaternion)", "def move(self, x, y):\n\n\t\tself._window.move(x, y)", "def _move(self):\n self.pos += self.direction # add direction vector\n self.direction += self.gravity # add gravity to direction\n self.direction = self.direction.elementwise() * self.drag # apply drag to direction", "def update(self):\n super().update()\n if self.center_y > TOP_LIMIT:\n self.center_y = BOTTOM_LIMIT\n if self.center_y < BOTTOM_LIMIT:\n self.center_y = TOP_LIMIT\n\n if self.center_x < 250:\n self.change_x = (0.2) * OBJECTS_SPEED\n elif self.center_x > SCREEN_WIDTH - 250:\n self.change_x = (-0.2) * OBJECTS_SPEED", "def update(self):\n self.x += (self.ai_settings.alien_speed_factor *\n self.ai_settings.fleet_direction)\n self.rect.x = self.x", "def update(self):\n\t\tself.x += (self.ai_settings.alien_speed_factor * \n\t\t\t\t\t\t\t\t\tself.ai_settings.fleet_direction)\n\t\tself.rect.x = self.x", "def move(self, direction):\n pass", "def set_position(self, x, y):\n self.position.x = x\n self.position.y = y\n self.rect.topleft = x, y", "def set_drone_position(self, new_point):\n self.drone.set_drone_position(new_point)" ]
[ "0.7571383", "0.7284361", "0.7184241", "0.7150989", "0.71362656", "0.7057185", "0.7012484", "0.6970319", "0.6948752", "0.688544", "0.6796978", "0.6755326", "0.674048", "0.6710682", "0.6673721", "0.66386414", "0.66232866", "0.66104656", "0.6609451", "0.6608374", "0.6564065", "0.6551069", "0.6545067", "0.65307933", "0.65252817", "0.6507886", "0.64947516", "0.64941454", "0.6484231", "0.6483183", "0.64780456", "0.6467434", "0.6464542", "0.6463345", "0.6456308", "0.64486235", "0.6442461", "0.6442461", "0.6439102", "0.6431964", "0.6430222", "0.6422986", "0.642277", "0.64215314", "0.6421098", "0.6397749", "0.6388393", "0.6374386", "0.6366071", "0.63642067", "0.63593566", "0.6348967", "0.6341233", "0.6340287", "0.63367665", "0.63236684", "0.6318202", "0.6305892", "0.63024694", "0.63014746", "0.6299447", "0.6299447", "0.62881815", "0.62855184", "0.62755555", "0.6274685", "0.62734634", "0.6270945", "0.6267646", "0.6263625", "0.6260469", "0.62597483", "0.62573904", "0.62533134", "0.6252613", "0.62478405", "0.6247202", "0.62344426", "0.62310904", "0.62295294", "0.62269866", "0.622685", "0.6224423", "0.6219828", "0.6218643", "0.6218586", "0.6214305", "0.6212481", "0.6208933", "0.6205176", "0.62010485", "0.6200695", "0.6195318", "0.61934483", "0.6192403", "0.6192132", "0.6191337", "0.6190737", "0.61898017", "0.61893916", "0.6188987" ]
0.0
-1
Dissapear on horizontal collision and bounce on vertical
def is_collided_vertical(self): # bounce of vertical borders -> y-axis-check if self.position[1] <= config['globals']['BALL_RADIUS']: self.velocity[1] *= -1 elif self.position[1] >= config['globals']['HEIGHT'] + 1 - config['globals']['BALL_RADIUS']: self.velocity[1] *= -1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_collide(self):\r\n if self.get_overlapping_sprites():\r\n self.dx = -self.dx", "def _alienCollide(self):\n for b in self._bolts:\n if self._ship != None and self._ship.collides(b):\n self._ship = None\n self._bolts = []\n self._key = False\n self._lives -= 1", "def on_collision(self):", "def collide(self, pos):\n\t\tpass", "def check_bounce(self):\n if self.ball.center.x < 0 and self.ball.velocity.dx < 0:\n self.ball.bounce_horizontal()\n\n if self.ball.center.y < 0 and self.ball.velocity.dy < 0:\n self.ball.bounce_vertical()\n\n if self.ball.center.y > SCREEN_HEIGHT and self.ball.velocity.dy > 0:\n self.ball.bounce_vertical()", "def _on_collision(self, event):\n self.collided = event", "def _shipCollision(self):\r\n for bolt in self._bolts:\r\n if self._ship.collides(bolt):\r\n self._ship = None\r\n if not self._shipexplode is None:\r\n self._shipexplode.play()\r\n self._bolts.remove(bolt)\r\n self._lives -= 1\r\n return", "def _shipCollide(self):\n for s in range(self.getLengthAlien()):\n for t in range(len(self._aliens[0])):\n for b in self._bolts:\n if self._aliens[s][t] != None and + \\\n self._aliens[s][t].collides(b):\n self._aliens[s][t] = None\n self._bolts.remove(b)\n self._key = False", "def bounce(self):\n \n if self.x > width - self.size:\n self.x = 2*(width - self.size) - self.x\n self.angle = self.angle * -1\n self.speed *= elasticity # Added to all to account for elasticity\n elif self.x < self.size:\n self.x = 2*self.size - self.x\n self.angle = self.angle * -1\n self.speed *= elasticity\n\n if self.y > height - self.size:\n self.y = 2*(height - self.size) - self.y\n self.angle = math.pi - self.angle\n self.speed *= elasticity\n elif self.y < self.size:\n self.y = 2*self.size - self.y\n self.angle = math.pi - self.angle\n self.speed *= elasticity", "def _bounce(self):\n right = self.surface.get_width() - self.size\n left = self.size\n top = self.size\n bottom = self.surface.get_height() - self.size\n if self.pos.x > right: # right border\n self.pos.x = right\n self.direction = self.direction.elementwise() * pygame.Vector2(-1.0, 1.0)\n elif self.pos.x < left: # left border\n self.pos.x = left\n self.direction = self.direction.elementwise() * pygame.Vector2(-1.0, 1.0)\n if self.pos.y > bottom: # bottom border\n self.pos.y = bottom\n self.direction = self.direction.elementwise() * pygame.Vector2(1.0, -1.0)\n elif self.pos.y < top: # top border\n self.pos.y = top\n self.direction = self.direction.elementwise() * pygame.Vector2(1.0, -1.0)", "def sidebounce(self):\r\n self.dx=-self.dx", "def collision(self, other: \"entities.Entity\") -> None:\n\n # Deactivation of bullet on collision with other entity strictly\n # speaking isn't related to the bullet movement component. Argument\n # could be made that this functionality ought to be places in a separate\n # component.\n self.container.active = False", "def collision_4():\r\n tu.reset()\r\n print(\"collision_4\")\r\n r = 100\r\n b1 = Ball2D(r=r, x=r, y=tbl.y_min, vy=tbl.v_max, color=\"blue\")\r\n b2 = Ball2D(r=r, x=0, y=tbl.y_max, vy=-tbl.v_max, color=\"red\")\r\n bc = BallCollision2D(balls=[b1, b2])\r\n max_r_sq = tbl.x_max**2 + tbl.y_max**2\r\n while (b1.x**2 + b1.y**2 < max_r_sq\r\n or b1.x**2 + b1.y**2 < max_r_sq):\r\n bc.ball_display()\r\n bc.ball_collision_update()\r\n time.sleep(t_update)\r\n bc.ball_update()\r\n if clear_at_end:\r\n bc.reset()", "def __handle_wall_collision(self):\n if self.__ball.x <= 0 or self.__ball.x + self.__ball.width >= self.__window.width:\n self.__dx = - self.__dx\n\n next_target_top = self.__window.get_object_at(self.__ball.x + self.__dx*1.5, self.__ball.y + self.__dy*1.5)\n next_target_bot = self.__window.get_object_at(self.__ball.x + self.__ball.width + self.__dx*1.5,\n self.__ball.y + self.__ball.height + self.__dy*1.5)\n\n if self.__hit_paddle(next_target_top) or self.__hit_paddle(next_target_bot):\n self.__dy = - abs(self.__dy)\n if self.__ball.x <= self.__paddle.x + 20:\n # The ball will fly left if hit the left of the paddle\n self.__dx = - abs(self.__dx)\n elif self.__ball.x > self.__paddle.x + self.__paddle.width - 20:\n # The ball will fly right if hit the right of the paddle\n self.__dx = abs(self.__dx)\n elif self.__hit_bricks(next_target_top) or self.__hit_bricks(next_target_bot):\n target_brick = next_target_top if next_target_top else next_target_bot\n self.__remove_brick(target_brick)\n self.__dy = - self.__dy\n elif self.__ball.y <= 0:\n self.__dy = - self.__dy\n elif self.__ball.y + self.__ball.height >= self.__window.height:\n self.__num_lives -= 1\n self.__playing = False\n self.__set_ball_position()\n self.__set_paddle_position()\n self.__set_ball_velocity()\n self.__set_record_board()", "def collide(self):\n\n # Kill projectile when it hits a screen edge\n if not self.rect.colliderect(self.scene.screen.get_rect()):\n self.kill()\n\n # Kill projectile when it hits a character, and also damage character.\n for char in self.scene.chars:\n if self.rect.colliderect(char.rect):\n char.damage(self.power)\n self.damage()", "def check_collision(self):\n if self.window.get_object_at(self.ball.x,self.ball.y+self.radius*2) is self.paddle:\n self.bounce()\n if self.window.get_object_at(self.ball.x+self.radius*2,self.ball.y+self.radius*2) is self.paddle:\n self.bounce()", "def collision_1():\r\n tu.reset()\r\n print(\"collision_1\")\r\n r = 100\r\n b1 = Ball2D(r=r, x=0, y=tbl.y_min, vy=tbl.v_max, color=\"blue\")\r\n b2 = Ball2D(r=r, x=0, y=0, color=\"red\")\r\n bc = BallCollision2D(balls=[b1,b2])\r\n while (b2.x**2 + b2.y**2 < max_r_sq):\r\n bc.ball_display()\r\n bc.ball_collision_update()\r\n time.sleep(t_update)\r\n bc.ball_update()\r\n if clear_at_end:\r\n bc.reset()", "def collide(self, xvel, yvel,\n space_mask_right, space_mask_left, space_mask_up, space_mask_bottom,\n reverse_x=False):\n for game_object in pygame.sprite.spritecollide(self, game_objects, False,\n collided=pygame.sprite.collide_mask):\n if isinstance(self, Player): # Если экземпляр класса основной игрок\n if game_object.collision and game_object.collision_do_kill:\n if not self.damage_mode and not self.death_mode:\n self.lives -= 1\n if self.lives > 0:\n self.damage()\n else:\n self.death()\n self.visible_hearts()\n if isinstance(game_object, Heart): # Если игрок столкнулся с бонусной жизнью\n self.lives += 1\n game_object.kill()\n self.visible_hearts()\n if isinstance(game_object, CheckPoint): # Если игрок столкнулся с чек-поинтом\n camera.set_memory(0, 0)\n width, height = game_object.rect.size\n image = game_object.image\n for y in range(width):\n for x in range(height):\n pixel = image.get_at((x, y))\n if pixel.a != 0:\n image.set_at((x, y), (100, 100, 100, 200))\n game_object.mask = pygame.mask.Mask((width, height), fill=0)\n game_object.collision = False\n if isinstance(game_object, Key): # Если игрок столкнулся с ключом от двери\n game_object.kill()\n self.key = True\n self.visible_key()\n if isinstance(game_object, Door): # Если игрок столкнулся с дверью\n if self.key:\n self.finish = True\n if isinstance(game_object, Coin): # Если игрок столкнулся с монетой\n self.coins += 1\n game_object.kill()\n if isinstance(game_object, Crystal): # Если игрок столкнулся с кристаллом\n self.crystals += 1\n game_object.kill()\n if isinstance(game_object, ButtonJump): # Если игрок столкнулся с батутом\n if yvel:\n self.yvel = -JUMP_POWER * JUMP_BOOST\n return\n if isinstance(game_object, Stairs): # Если игрок столкнулся с лестницей\n keys_status = pygame.key.get_pressed()\n if keys_status[pygame.K_w]:\n self.yvel = -MOVE_SPEED\n elif keys_status[pygame.K_s]:\n self.yvel = MOVE_SPEED\n else:\n self.yvel = 0\n if pygame.key.get_mods() & pygame.KMOD_LSHIFT:\n self.yvel *= SPEED_UP_BOOST\n self.on_stairs = True\n if not game_object.collision_player: # Если у объекта отключена коллизия с игроком\n continue\n else: # Если экземпляр класса основной противник\n if not game_object.collision_enemy: # Если у объекта отключена коллизия с врагом\n continue\n\n if reverse_x and xvel != 0:\n self.xvel = -self.xvel\n if xvel > 0:\n self.rect.right = game_object.rect.left + space_mask_right - 1\n if xvel < 0:\n self.rect.left = game_object.rect.right - space_mask_left + 1\n if yvel > 0:\n self.rect.bottom = game_object.rect.top + space_mask_bottom - 1\n self.on_ground = True\n self.yvel = 0\n if yvel < 0:\n self.rect.top = game_object.rect.bottom - space_mask_up + 1\n self.yvel = 0", "def _collide(self):\n\n collisions = self._get_collisions()\n for collision in collisions:\n self._update_excitation(collision)\n atom1 = self.atoms[collision[0]]\n atom2 = self.atoms[collision[1]]\n\n r = atom1.pos-atom2.pos\n r_mag = np.linalg.norm(r)\n r_hat = r/r_mag\n\n v_1_r = np.dot(atom1.vel, r_hat)\n v_2_r = np.dot(atom2.vel, r_hat)\n\n v_1_r_f = (atom1.mass-atom2.mass)*v_1_r/(atom1.mass + atom2.mass)\\\n + 2*atom2.mass*v_2_r/(atom1.mass + atom2.mass)\n v_2_r_f = (atom2.mass-atom1.mass)*v_2_r/(atom1.mass + atom2.mass)\\\n + 2*atom1.mass*v_1_r/(atom1.mass + atom2.mass)\n\n delta_v_1 = (v_1_r_f - v_1_r)*r_hat\n delta_v_2 = (v_2_r_f - v_2_r)*r_hat\n\n self.atoms[collision[0]].vel += delta_v_1\n self.atoms[collision[1]].vel += delta_v_2", "def absorb(self, collision):\n # Calculate total momentum and mass\n mass = sum(self.cells[ele].area() for ele in collision)\n px = sum(self.cells[ele].area() * self.cells[ele].veloc[0] for ele in collision)\n py = sum(self.cells[ele].area() * self.cells[ele].veloc[1] for ele in collision)\n # Determine the biggest cell\n collision.sort(key = lambda ele: self.cells[ele].radius)\n biggest = collision.pop()\n self.cells[biggest].radius = (mass / math.pi) ** 0.5\n self.cells[biggest].veloc[0] = px / mass\n self.cells[biggest].veloc[1] = py / mass\n for ele in collision:\n self.cells[ele].dead = True", "def _wall_bounce(self):\n\n if (self.pos[0] < self.rad):\n self.vel[0] = abs(self.vel[0])\n elif (self.pos[0] > self.disp_size[0]-self.rad):\n self.vel[0] = -abs(self.vel[0])\n if (self.pos[1] < self.rad):\n self.vel[1] = abs(self.vel[1])\n elif (self.pos[1] > self.disp_size[1]-self.rad):\n self.vel[1] = -abs(self.vel[1])", "def detect_collision():\n # with the top and bottom of screen\n if ball.ycor() > GAME_Y_BARRIER or ball.ycor() < -GAME_Y_BARRIER:\n ball.bounce_y()\n # with the paddles\n if ball.distance(paddle_right) < 50 and ball.xcor() > GAME_X_BARRIER \\\n or ball.distance(paddle_left) < 50 and ball.xcor() < -GAME_X_BARRIER:\n ball.bounce_x()", "def check_collide(self):\n for pizza in self.overlapping_sprites:\n pizza.handle_collide()\n pizza.destroy", "def collide(self, obj):\n if obj.type == 'food':\n if self.carry_food == False:\n self.r += 1\n self.color = (0, 255, 0, 128)\n self.carry_food = True\n if self.mass > obj.mass:\n pass\n elif self.mass*5 < obj.mass:\n self.orientation += math.pi\n if (obj.type == 'wall') and (obj.colony):\n if self.carry_food == True:\n self.r += 100\n self.carry_food = False\n self.color = (128, 128, 128, 128)\n else:\n pass\n # self.orientation += rnd.choice([math.pi / 4, - math.pi / 4])\n # else:\n # print('collision')\n # self.orientation += math.pi", "def collision_2():\r\n tu.reset()\r\n print(\"collision_2\")\r\n r = 100\r\n b1 = Ball2D(r=r, x=r, y=tbl.y_min, vy=tbl.v_max, color=\"blue\")\r\n b2 = Ball2D(r=r, x=0, y=0, color=\"red\")\r\n bc = BallCollision2D(balls=[b1, b2])\r\n while (b1.x**2 + b1.y**2 < max_r_sq\r\n or b2.x**2 + b2.y**2 < max_r_sq):\r\n bc.ball_display()\r\n bc.ball_collision_update()\r\n time.sleep(t_update)\r\n bc.ball_update()\r\n if clear_at_end:\r\n bc.reset()", "def detectWallCollision(self): \n if self.posn_x > cw - self.ball_width: # Collision with right-hand container wall. \n self.velocity_x = -self.velocity_x * self.coef_restitution # reverse direction. \n self.posn_x = cw - self.ball_width * 1.1 # anti-stick to the wall \n if self.posn_x < 1: # Collision with left-hand wall. \n self.velocity_x = -self.velocity_x * self.coef_restitution \n self.posn_x = 2 # anti-stick to the wall \n if self.posn_y < self.ball_height: # Collision with ceiling. \n self.velocity_y = -self.velocity_y * self.coef_restitution \n self.posn_y = self.ball_height * 1.1 # ceiling collision anti-stick \n if self.posn_y > ch - self.ball_height * 1.1 : # Floor collision. \n self.velocity_y = - self.velocity_y * self.coef_restitution \n self.posn_y = ch - self.ball_height * 1.1 # anti-stick. Prevents out-of-bounds ball loss (stickiness) ", "def _check_collisions(self):\n\t\tif pygame.sprite.spritecollide(\n\t\t\tself.bolan, \n\t\t\tself.obstacles.obstacles,\n\t\t\tFalse, \n\t\t\tpygame.sprite.collide_mask):\n\t\t\t\tself.is_play = False\n\t\t\t\tself.is_gameover = True\n\t\t\t\tself.bolan.image = self.settings.bolan_dead_image", "def update(self):\r\n if self.__var__ == 'right':\r\n self.rect.x -= self.speedy\r\n elif self.__var__ == 'left':\r\n self.rect.x += self.speedy\r\n elif self.__var__ == 'up':\r\n self.rect.y += self.speedy\r\n elif self.__var__ == 'down':\r\n self.rect.y -= self.speedy\r\n if self.rect.x > WIDTH or self.rect.x < 0 or self.rect.y > HEIGHT or self.rect.y < 0:\r\n ammo.remove(self)", "def update(self):\r\n if self.right > games.screen.width: #or self.left < 0:\r\n self.dx = -self.dx\r\n\r\n if self.bottom > games.screen.height or self.top < 0:\r\n self.dy = -self.dy\r\n\r\n self.handle_collide()\r\n\r\n if self.left < 60:\r\n self.end_game()", "def update(self, event):\n self.check_collision()\n self.player.animate()\n self.ghost.animate()", "def process_collision(self, obj, target):\n if obj == \"small_hex\" and not self.smallhex.small_hex_flag:\n self.ship.velocity.x = -self.ship.velocity.x\n self.ship.velocity.y = -self.ship.velocity.y\n self.gameevents.add(\"score-\", \"pnts\", self.config['Score']['small_hex_penalty'])\n self.gameevents.add(\"score-\", \"flight\", self.config['Score']['small_hex_penalty'])\n self.smallhex.small_hex_flag = True\n elif obj == \"shell\":\n #remove shell, target is index of shell in shell_list\n del self.shell_list[target]\n self.gameevents.add(\"score-\", \"pnts\", self.config['Score']['shell_hit_penalty'])\n self.gameevents.add(\"score-\", \"fortress\", self.config['Score']['shell_hit_penalty'])\n self.ship.take_damage()\n if not self.ship.alive:\n self.gameevents.add(\"destroyed\", \"ship\", \"shell\")\n self.gameevents.add(\"score-\", \"pnts\", self.config['Score']['ship_death_penalty'])\n self.gameevents.add(\"score-\", \"fortress\", self.config['Score']['ship_death_penalty'])\n self.ship.color = (255, 255, 0)\n elif self.config['Ship']['colored_damage']:\n g = 255 / self.ship.start_health * (self.ship.health - 1)\n self.ship.color = (255, g, 0)\n\n elif obj.startswith(\"missile_\"):\n #if missile hits fortress, need to check if it takes damage when mine is onscreen\n if target == \"fortress\" and (len(self.mine_list) == 0 or self.config['Fortress']['hit_fortress_while_mine']):\n if self.ship.shot_timer.elapsed() >= self.config['Fortress']['vlner_time']:\n self.gameevents.add(\"score+\", \"vlner\", 1)\n if self.ship.shot_timer.elapsed() < self.config['Fortress']['vlner_time'] and self.score.vlner >= self.config['Fortress']['vlner_threshold']:\n self.gameevents.add(\"destroyed\", \"fortress\")\n self.fortress.alive = False\n #r = choice([0,45,90,135,180,225,270,315])\n #if r:\n # self.explosion.rotate(r)\n self.fortress.reset_timer.reset()\n self.snd_explosion.play()\n self.gameevents.add(\"score+\", \"pnts\", self.config['Score']['destroy_fortress'])\n self.gameevents.add(\"score+\", \"fortress\", self.config['Score']['destroy_fortress'])\n self.score.vlner = 0\n self.destroyedFortresses += 1\n self.gameevents.add(\"reset\", \"VLNER\")\n #do we reset the mine timer?\n if self.config['Mine']['fortress_resets_mine']:\n self.mine_list.timer.reset()\n self.mine_list.flag = False\n elif self.ship.shot_timer.elapsed() < self.config['Fortress']['vlner_time'] and self.score.vlner < self.config['Fortress']['vlner_threshold']:\n self.gameevents.add(\"reset\", \"VLNER\")\n self.score.vlner = 0\n self.snd_vlner_reset.play()\n self.ship.shot_timer.reset()\n elif target.startswith(\"mine_\"):\n #deal with missile hitting mine\n #can the mine be hit?\n if len(self.mine_list) > 0:\n if self.mine_list[0].tagged == \"fail\":\n self.gameevents.add(\"collide\", \"fail_tagged_mine\")\n elif self.mine_list[0].tagged == \"disabled\":\n self.gameevents.add(\"collide\", \"disable_tagged_mine\")\n elif self.mine_list[0].tagged == \"untagged\":\n if self.score.iff in self.mine_list.foe_letters:\n self.mine_list[0].tagged = \"disable\"\n self.gameevents.add(\"collide\", \"untagged_foe_mine\")\n else:\n self.gameevents.add(\"collide\", \"friend_mine\")\n elif self.mine_list[0].tagged == \"tagged\" and self.score.iff in self.mine_list.foe_letters:\n self.gameevents.add(\"collide\", \"tagged_foe_mine\")\n elif obj.startswith(\"mine_\"):\n #mine hit the ship\n index = int(obj[-1])\n #check to see if mine is still alive, it is possible to shot and\n #collide with a mine at the same time, ties go to ship\n if index < len(self.mine_list):\n del self.mine_list[index]\n self.score.iff = ''\n self.score.intrvl = 0\n self.mine_list.flag = False\n self.mine_list.iff_flag = False\n self.mine_list.timer.reset()\n self.gameevents.add(\"score-\", \"pnts\", self.config['Score']['mine_hit_penalty'])\n self.gameevents.add(\"score-\", \"mines\", self.config['Score']['mine_hit_penalty'])\n self.mine2 -= self.config['Score']['mine_hit_penalty']\n self.ship.take_damage()\n if not self.ship.alive:\n self.gameevents.add(\"destroyed\", \"ship\", \"mine\")\n self.gameevents.add(\"score-\", \"pnts\", self.config['Score']['ship_death_penalty'])\n self.gameevents.add(\"score-\", \"mines\", self.config['Score']['ship_death_penalty'])\n self.mine2 -= self.config['Score']['ship_death_penalty']\n self.ship.color = (255, 255, 0)\n elif self.config['Ship']['colored_damage']:\n g = 255 / self.ship.start_health * (self.ship.health - 1)\n self.ship.color = (255, g, 0)\n elif obj == \"friend_mine\":\n #get rid of mine\n self.destroyedMines += 1\n self.mine_list.flag = False\n self.mine_list.iff_flag = False\n self.gameevents.add(\"score+\", \"mines\", self.config['Score']['energize_friend'])\n self.gameevents.add(\"score+\", \"pnts\", self.config['Score']['energize_friend'])\n #see how long mine has been alive. 0-100 points if destroyed within 10 seconds\n self.gameevents.add(\"score+\", \"mines\", 100 - 10 * math.floor(self.mine_list.timer.elapsed() / 1000))\n self.gameevents.add(\"score+\", \"speed\", 100 - 10 * math.floor(self.mine_list.timer.elapsed() / 1000))\n #print self.mine_list.timer.elapsed()\n #print 100 - 10 * math.floor(self.mine_list.timer.elapsed()/1000)\n self.mine_list.timer.reset()\n self.mine2 += 50\n #amazingly, missile can hit the mine in the same frame as the mine hits the ship\n if len(self.mine_list) > 0:\n del self.mine_list[0]\n self.score.iff = ''\n self.score.intrvl = 0\n elif obj == \"tagged_foe_mine\":\n #get rid of mine\n self.destroyedMines += 1\n self.mine_list.flag = False\n self.mine_list.iff_flag = False\n self.gameevents.add(\"score+\", \"mines\", self.config['Score']['destroy_foe'])\n self.gameevents.add(\"score+\", \"pnts\", self.config['Score']['destroy_foe'])\n #see how long mine has been alive. 0-100 points if destroyed within 10 seconds\n self.gameevents.add(\"score+\", \"mines\", 100 - 10 * math.floor(self.mine_list.timer.elapsed() / 1000))\n self.gameevents.add(\"score+\", \"speed\", 100 - 10 * math.floor(self.mine_list.timer.elapsed() / 1000))\n self.mine_list.timer.reset()\n self.mine2 += 75\n if len(self.mine_list) > 0:\n del self.mine_list[0]\n self.score.iff = ''\n self.score.intrvl = 0", "def handle_collide(self):\n self.x = fuck.randrange(games.screen.width)\n self.y = fuck.randrange(games.screen.height)", "def handle_collide(self):\n self.x = fuck.randrange(games.screen.width)\n self.y = fuck.randrange(games.screen.height)", "def handle_collide(self):\n self.x = fuck.randrange(games.screen.width)\n self.y = fuck.randrange(games.screen.height)", "def on_hit(self, event, data):\n world, player = data\n # Ensure the top of the bounce block is being hit\n if get_collision_direction(player, self) == \"A\":\n self._active = True\n player.set_velocity((0, -3*player.get_max_velocity())) # bounce the player\n player.set_jumping(False) # player can't jump while bounced\n player.set_bounced(True)", "def update(self):\n if self.left < 0 or self.right > games.screen.width:\n self.dx = -self.dx\n elif random.randrange(self.odds_change) == 0:\n self.dx = -self.dx\n self.collided = False \n self.check_collide()", "def handle_vertical_collision(self, level):\n collisions = pygame.sprite.spritecollide(self, level.terrain, False)\n if collisions:\n self.height = 4\n self.gravity -= 0.1\n for collision in collisions:\n # Collision with ceilings\n if self.velocity.y < 0 and self.rect.top > collision.rect.bottom:\n self.rect.top = collision.rect.bottom\n self.velocity.y = 0\n self.height = 0\n # Odd conditionals, but seems to fix fireball clipping through floor\n elif ((self.velocity.y > 0 and self.rect.bottom < collision.rect.top)\n or collision.rect.collidepoint(self.rect.center)):\n self.rect.bottom = collision.rect.top", "def collide(b1, b2):\n dx = b1.x - b2.x\n dy = b1.y - b2.y\n\n distance = math.hypot(dx, dy)\n \n if distance < b1.size + b2.size: # If they have collided\n tangent = math.atan2(dy, dx) # Find the tangent of the point\n angle = 0.5 * math.pi + tangent # We use this later on\n b1.angle = 2*tangent - b1.angle # Alter angles\n b2.angle = 2*tangent - b2.angle\n (b1.speed, b2.speed) = (b2.speed, b1.speed) # Swap speeds\n b1.speed *= elasticity # Reduce speed due to elasticity\n b2.speed *= elasticity\n\n b1.x += math.sin(angle) # Move particles away from each other\n b1.y -= math.cos(angle)\n b2.x -= math.sin(angle)\n b2.y += math.cos(angle)", "def update():\n move()\n check_collision()", "def hit_object(self):\n obj1 = self.window.get_object_at(self.ball.x, self.ball.y)\n obj2 = self.window.get_object_at(self.ball.x, self.ball.y + BALL_RADIUS*2)\n obj3 = self.window.get_object_at(self.ball.x + BALL_RADIUS*2, self.ball.y)\n obj4 = self.window.get_object_at(self.ball.x + BALL_RADIUS*2, self.ball.y + BALL_RADIUS*2)\n if obj1 is not None and obj1.width == BRICK_WIDTH:\n self.window.remove(obj1)\n self.num_bricks -= 1\n self.__dy = -self.__dy\n elif obj2 is not None and obj2.width == BRICK_WIDTH:\n self.window.remove(obj2)\n self.num_bricks -= 1\n self.__dy = -self.__dy\n elif obj2 is not None and obj2.width == PADDLE_WIDTH:\n self.__dy = -self.__dy\n elif obj3 is not None and obj3.width == BRICK_WIDTH:\n self.window.remove(obj3)\n self.num_bricks -= 1\n self.__dy = -self.__dy\n elif obj4 is not None and obj4.width == BRICK_WIDTH:\n self.window.remove(obj4)\n self.num_bricks -= 1\n self.__dy = -self.__dy\n elif obj4 is not None and obj4.width == PADDLE_WIDTH:\n self.__dy = -self.__dy", "def bounce_collision(self, otherball):\r\n # relative positions\r\n dx = self.unif[0] - otherball.unif[0]\r\n dy = self.unif[1] - otherball.unif[1]\r\n rd = self.radius + otherball.radius\r\n # check sign of a.b to see if converging\r\n dotP = dot([dx, dy, 0.0],\r\n [self.vx - otherball.vx, self.vy - otherball.vy, 0.0])\r\n if dx * dx + dy * dy <= rd * rd and dotP < 0:\r\n R = otherball.mass / self.mass #ratio of masses\r\n \"\"\"Glancing angle for equating angular momentum before and after collision.\r\n Three more simultaneous equations for x and y components of momentum and\r\n kinetic energy give:\r\n \"\"\"\r\n if dy:\r\n D = dx / dy\r\n delta2y = 2 * (D * self.vx + self.vy -\r\n D * otherball.vx - otherball.vy) / (\r\n (1 + D * D) * (R + 1))\r\n delta2x = D * delta2y\r\n delta1y = -1 * R * delta2y\r\n delta1x = -1 * R * D * delta2y\r\n elif dx:\r\n # Same code as above with x and y reversed.\r\n D = dy / dx\r\n delta2x = 2 * (D * self.vy + self.vx -\r\n D * otherball.vy - otherball.vx) / (\r\n (1 + D * D) * (R + 1))\r\n delta2y = D * delta2x\r\n delta1x = -1 * R * delta2x\r\n delta1y = -1 * R * D * delta2x\r\n else:\r\n delta1x = delta1y = delta2x = delta2y = 0\r\n\r\n self.vx += delta1x\r\n self.vy += delta1y\r\n otherball.vx += delta2x\r\n otherball.vy += delta2y", "def on_collide(self, collider, point, amount):\n pass", "def collision_3():\r\n tu.reset()\r\n print(\"collision_3\")\r\n r = 100\r\n b1 = Ball2D(r=r, x=0, y=tbl.y_min, vy=tbl.v_max, color=\"blue\")\r\n b2 = Ball2D(r=r, x=0, y=tbl.y_max, vy=-tbl.v_max, color=\"red\")\r\n bc = BallCollision2D(balls=[b1, b2])\r\n max_r_sq = tbl.x_max**2 + tbl.y_max**2\r\n while (b1.x**2 + b1.y**2 < max_r_sq\r\n or b2.x**2 + b2.y**2 < max_r_sq):\r\n bc.ball_display()\r\n bc.ball_collision_update()\r\n time.sleep(t_update)\r\n bc.ball_update()\r\n if clear_at_end:\r\n bc.reset()", "def collide(b1,b2):\n if mag(b1.pos-b2.pos) < (b1.radius + b2.radius - .05):\n return True", "def bounce_horizontal(self):\n self.velocity.dx = -self.velocity.dx\n return", "def check_collision(self, sprite1, sprite2):\r\n col = pygame.sprite.collide_rect(sprite1, sprite2)\r\n if col == True:\r\n if step != 40 and step != 80:\r\n self.speed[0] = - self.speed[0]\r\n self.speed[1] = - self.speed[1]\r\n self.image = pygame.transform.flip(self.image, 1, 0)", "def stop_or_rebound(self):\n if pygame.time.get_ticks() - self.last_bounce_time < STOPPING_LIM and self.last_bounce_time != 0:\n self.y = REST_Y - self.radius\n self.vel_x = 0\n self.vel_y = 0\n self.vel = 0\n else:\n self.vel_y = -self.vel_y * REBOUND_SCALE\n self.vel_x *= REBOUND_SCALE", "def update(self, world):\n self.rect.move_ip(self.vel)\n \n if not world.rect.contains(self.rect):\n self.kill()\n return\n self.active_timer -= 1\n if self.active_timer <= 0:\n self.collision_check()", "def update(self):\n self.rect.y += self.speedy\n ## kill the sprite after it moves over the top border\n if self.rect.top > HEIGHT:\n self.kill()", "def detectWallCollision(self):\n if self.right >= GAME_WIDTH or self.left <= 0:\n self._vx = -1.0 * self._vx\n if self.top >= GAME_HEIGHT:\n self._vy = -1.0 * self._vy", "def is_collision(self):\n to_del_tie_fighters = []\n to_del_bullets = []\n for i in range(len(self.tie_fighters)):\n for j in range(len(self.bullets)):\n distance = math.sqrt(math.pow(self.tie_fighters[i].position_x -\n self.bullets[j].position_x, 2) +\n math.pow(self.tie_fighters[i].position_y -\n self.bullets[j].position_y, 2))\n if distance < 25:\n to_del_tie_fighters.append(i)\n to_del_bullets.append(j)\n self.score += 1\n for i in reversed(to_del_tie_fighters):\n del self.tie_fighters[i]\n self.number_of_tie_fighters -= 1\n for j in reversed(to_del_bullets):\n del self.bullets[j]", "def handleCollide(self, args, geom1, geom2):\n log.debug('handleCollide')\n # calculate intersection points\n cl = ode.collide(geom1, geom2)\n contacts = [c for c in cl if not (isinstance(c.getContactGeomParams()[3], ode.GeomCCylinder) and isinstance(c.getContactGeomParams()[4], ode.GeomCCylinder))]\n for c in contacts:\n assert c.getContactGeomParams()[1] == (0.0, 0.0, 1.0)\n log.debug('collision between %s and %s', str(geom1), str(geom2))\n self.addContact(geom1, geom2, c)\n log.debug('/handleCollide')", "def handle_collide(self):\n\t\tself.x = random.randrange(games.screen.width)\n\t\tself.y = random.randrange(games.screen.height)", "def check_bounds(self):\n\n if self.bounds_action == self.BOUNCE:\n if self.hits_left_or_right():\n self.dx = self.dx * -1\n if self.hits_top_or_bottom():\n self.dy = self.dy * -1\n\n if self.bounds_action == self.STOP:\n if self.hits_left_or_right():\n self.dx = 0\n self.dy = 0\n if self.hits_top_or_bottom():\n self.dx = 0\n self.dy = 0\n\n if self.bounds_action == self.SKID:\n if self.hits_left_or_right():\n self.dx = 0\n if self.hits_top_or_bottom():\n self.dy = 0\n\n if self.bounds_action == self.DIE:\n if self.hits_left_or_right() or self.hits_top_or_bottom():\n self.dx = 0\n self.dy = 0\n self.visible = False", "def corral_collide(ball):\r\n\r\n # If the ball hits wallA\r\n if ball.pos.z < wallA.pos.z: # Hit -- check for z\r\n ball.pos.z = wallA.pos.z # Bring back into bounds\r\n ball.vel.z *= -1.0 # Reverse the z velocity\r\n\r\n # If the ball hits wallB\r\n if ball.pos.x < wallB.pos.x: # Hit -- check for x\r\n ball.pos.x = wallB.pos.x # Bring back into bounds\r\n ball.vel.x *= -1.0 # Reverse the x velocity\r\n \r\n # If the ball hits wallC\r\n if ball.pos.z > wallC.pos.z: # Hit -- check for x\r\n ball.pos.z = wallC.pos.z # Bring back into bounds\r\n ball.vel.z *= -1.0 # Reverse the x velocity\r\n \r\n # If the ball hits wallD\r\n if ball.pos.x > wallD.pos.x: #Hit -- check for z\r\n ball.pos.x = wallD.pos.x # Bring back into bounds\r\n ball.vel.x *= -1.0 #Reverse the z velocity", "def check_collide(self):\n\n\t\tfor pizza in self.overlapping_sprites:\n\t\t\tpizza.handle_collide()", "def check_collide(self):\n for pizza in self.overlapping_sprites:\n pizza.handle_collide()", "def update(self):\n self.rect.y += self.speedy\n # kill the sprite after it moves over the top border\n if self.rect.top > HEIGHT:\n self.kill()", "def vertbounce(self):\r\n self.dy=-self.dy", "def drop(self):\n if (pyxel.frame_count % self.vy) == 0:\n mapDel(self, theFallen)\n self.y = (self.y + 1)\n mapAdd(self, theFallen)", "def check_collide(self):\r\n for raindrop in self.overlapping_sprites:\r\n raindrop.handle_collide()", "def on_collide_exit(self, collider, point):\n pass", "def collision(self, a, b):\n kata = a.y - b.y\n katb = a.x - b.x\n hippo = math.sqrt(kata*kata + katb*katb)\n angle = math.asin(kata/hippo)\n\n ux = a.dx*math.cos(-angle) - a.dy*math.sin(-angle) #Rotation\n uy = a.dx*math.sin(-angle) + a.dy*math.cos(-angle)\n\n bux = b.dx*math.cos(-angle) - b.dy*math.sin(-angle)\n buy = b.dx*math.sin(-angle) + b.dy*math.cos(-angle)\n\n aux = (ux*(a.mag-b.mag)+2*b.mag*bux)/(a.mag+b.mag) #Calculate Velocity\n abux = (bux*(b.mag-a.mag)+2*a.mag*ux)/(b.mag+a.mag)\n\n b.dx = abux*math.cos(angle) - buy*math.sin(angle) #Rotation back\n b.dy = abux*math.sin(angle) + buy*math.cos(angle)\n\n a.dx = aux*math.cos(angle) - uy*math.sin(angle)\n a.dy = aux*math.sin(angle) + uy*math.cos(angle)", "def has_collide(self, obj):\n rect1 = self.anim.getRect()\n rect2 = obj.anim.getRect()\n \n rect1.move_ip(self.pos)\n rect2.move_ip(obj.pos)\n \n return rect1.colliderect(rect2)", "def update(self):\n if not self.dead:\n self.image = self.rot_center()\n self.mask = pygame.mask.from_surface(self.image)\n\n if self.health <= 0:\n self.dead = True\n self.image = self.ghost.update()\n self.mask = pygame.mask.from_surface(self.image)", "def update(self):\n self.rect.y += self.speedy\n ## kill the sprite after it moves over the top border\n if self.rect.bottom < 0:\n self.kill()\n\n ## now we need a way to shoot\n ## lets bind it to \"spacebar\".\n ## adding an event for it in Game loop", "def collision(self):\n raise NotImplementedError", "def is_wall_collided(self)-> bool:\n # print('{} >= {} or {} <= 0'.format(self.x + self.width, self.windows_size))\n if self.x <= 0:\n self.velocity = -self.velocity\n return True\n return False", "def __remove_collision(self,x_pos,y_pos):\r\n random_board = random.choice(self.board_list).board\r\n collision_measurement = random_board[y_pos][x_pos]\r\n \r\n self.board_list = [board for board in self.board_list if board.board[y_pos][x_pos] == collision_measurement]", "def handle_collide(self):\r\n self.x = random.randrange(games.screen.width)\r\n self.y = random.randrange(games.screen.height)", "def update(self):\n self.rect.y += self.speedy\n # If the bullet doesn't hit a pawn, it is removed from the screen\n if self.rect.bottom > 600:\n self.kill()", "def checkEdges( self ):\n\t\tx, y = self.position.xy\n\t\tvx, vy = self.velocity.xy\n\t\t\n\t\t#if particle hit left or right wall\n\t\tif abs( x ) > WINDOW_X - self.r:\n\t\t\t#change vertical speed\n\t\t\tvx *= -1\n\t\t\t\n\t\t#if particle hit top or bottom wall\n\t\tif abs( y ) > WINDOW_Y - self.r:\n\t\t\t#change horizontal speed\n\t\t\tvy *= -1\n\t\t\n\t\t#enter new velocity\n\t\tself.velocity.xy = (vx, vy)", "def update(self):\n\n a = [0.0, 0.0]\n a[0] += cos(radians(self.rot+90)) * self.speed\n a[1] += sin(radians(self.rot+90)) * self.speed\n\n self.x -= a[0]\n self.y -= a[1]\n\n if self.x < 0 or self.y < 0:\n self.owner.bullet_list.remove(self)\n del(self)\n\n elif self.x > 500 or self.y > 500:\n self.owner.bullet_list.remove(self)\n del(self)", "def update(self):\n self.rect.y += self.speedy\n # kill the sprite after it moves over the top border\n if self.rect.bottom < 0:\n self.kill()\n\n # now we need a way to shoot\n # lets bind it to \"spacebar\".\n # adding an event for it in Game loop", "def on_collision(self, other_sprite, game):\n pass", "def collision( ball1, ball2 ):\n\t\t\n\t#equation from wikipedia\n\ta1 = 2 * float(ball2.mass / (ball1.mass + ball2.mass))\t\t\t\t\t\t\t\t# 2 * m2 / ( m1 + m2 ) \n\ta2 = 2 - a1\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# 2 * m1 / ( m1 + m2 ) = 2 - m2 / ( m1 + m2 ) \n\tb = (ball1.velocity - ball2.velocity) * (ball1.position - ball2.position)\t\t\t# < v1 - v2, x1 - x2 > = < v2 - v1, x2 - x1 >\n\tc = (ball1.position - ball2.position).norm() \t\t\t\t\t\t\t\t\t\t# || x1 - x2 || ^ 2\t= || x2 - x1 || ^ 2\t\n\tif c == 0:\n\t\tc = 0.01\t\t\t\t\t\t\n\td = b / c\n\n\t#enter new velocites\n\tball1.velocity = ball1.velocity - (ball1.position - ball2.position) * a1 * d\n\tball2.velocity = ball2.velocity - (ball2.position - ball1.position) * a2 * d\n\n\t#changing color \n\tball1.color = ball2.color = ( \t(ball1.color[0] + ball2.color[0]) * 0.5, \n\t\t\t\t\t\t\t\t\t(ball1.color[1] + ball2.color[1]) * 0.5, \n\t\t\t\t\t\t\t\t\t(ball1.color[2] + ball2.color[2]) * 0.5\t\t)", "def e_collisions(rect, enemy_list):\r\n for enemy in enemy_list:\r\n if rect.colliderect(enemy[1]) and rect.bottom - 4 < enemy[1].top:\r\n dropped_objects.append([[enemy[1].x, enemy[1].y], heart_img, [0, 0], 45])\r\n enemy_list.remove(enemy)", "def collide(self, other, extra=None):\n pass", "def brickCollision(self, ball):\n for brick in self._bricks:\n if brick.contains(ball.right,ball.bottom) and brick in self._bricks:\n self.breaking.play()\n self._bricks.remove(brick)\n #print 'bottom right'\n ball.verticalBounce()\n if brick.contains(ball.left,ball.bottom) and brick in self._bricks:\n self.breaking.play()\n self._bricks.remove(brick)\n #print 'bottom left'\n ball.verticalBounce()\n if brick.contains(ball.right,ball.top) and brick in self._bricks:\n self.breaking.play()\n self._bricks.remove(brick)\n #print 'top right'\n ball.verticalBounce()\n if brick.contains(ball.left,ball.top) and brick in self._bricks:\n self.breaking.play()\n self._bricks.remove(brick)\n #print 'top left'\n ball.verticalBounce()", "def check_falling(self, obstacles):\n self.rect.move_ip((0, 1))\n if not pygame.sprite.spritecollideany(self, obstacles):\n if not self.climb:\n\t self.fall = True\n\n self.rect.move_ip((0, -1))", "def collision_6():\r\n tu.reset()\r\n print(\"collision_6\")\r\n r = 100\r\n sep = r*.0\r\n maxby = tbl.y_min + 7*r\r\n b1 = Ball2D(r=r, x=0, y=tbl.y_min, vy=tbl.v_max, color=\"blue\")\r\n b2 = Ball2D(r=r, x=0, y=maxby, vy=0, color=\"red\")\r\n b3 = Ball2D(r=r, x=0, y=maxby-2*(r+sep), vy=0, color=\"orange\")\r\n b4 = Ball2D(r=r, x=0, y=maxby-4*(r+sep), vy=0, color=\"green\")\r\n bc = BallCollision2D(balls=[b1, b2, b3, b4])\r\n while (b1.x**2 + b1.y**2 < max_r_sq\r\n and b2.x**2 + b2.y**2 < max_r_sq\r\n and b3.x**2 + b3.y**2 < max_r_sq\r\n and b4.x**2 + b4.y**2 < max_r_sq):\r\n bc.ball_display()\r\n bc.ball_collision_update()\r\n time.sleep(t_update)\r\n bc.ball_update()\r\n if clear_at_end:\r\n bc.reset()", "def collision_5():\r\n tu.reset()\r\n print(\"collision_5\")\r\n r = 100\r\n maxby = tbl.y_max - 3*r\r\n b1 = Ball2D(r=r, x=0, y=tbl.y_min, vy=tbl.v_max, color=\"blue\")\r\n b2 = Ball2D(r=r, x=-r, y=maxby, vy=0, color=\"red\")\r\n b3 = Ball2D(r=r, x=+r, y=maxby, vy=0, color=\"orange\")\r\n b4 = Ball2D(r=r, x=0, y=maxby-r*sqrt(3), vy=0, color=\"green\")\r\n bc = BallCollision2D(balls=[b1, b2, b3, b4])\r\n max_r_sq = tbl.x_max**2 + tbl.y_max**2\r\n while (b2.x**2 + b2.y**2 < max_r_sq\r\n or b3.x**2 + b3.y**2 < max_r_sq):\r\n bc.ball_display()\r\n bc.ball_collision_update()\r\n #time.sleep(int(.01))\r\n time.sleep(t_update)\r\n bc.ball_update()\r\n if clear_at_end:\r\n bc.reset()", "def update(self):\n self.rect.y += self.speedy\n if self.rect.bottom < 0:\n self.kill()", "def update(self):\n self.rect.y += self.speedy\n if self.rect.bottom < 0:\n self.kill()", "def check_boundary(self):\n\n\t\tif self.Bubble_initial_pos[0] <= self.Bubble_radius or self.Bubble_initial_pos[0] >= self.tk_pic.width - self.Bubble_radius:\n\t\t\tself.Bubble_vel[0] = -self.Bubble_vel[0]", "def ball_collision_update(self):\r\n ball_pairs = self.balls_colliding()\r\n for ball_pair in ball_pairs:\r\n b1,b2 = ball_pair\r\n self.ball_pair_collision_update(b1,b2)", "def update(self):\r\n # Desplaza el bloque un píxel hacia abajo. s\r\n if self.rect.left < 50 or self.rect.right > 600:\r\n self.speed[0] = -self.speed[0]\r\n if self.rect.top < 0 or self.rect.bottom > 200:\r\n self.speed[1] = -self.speed[1]\r\n self.rect.move_ip((self.speed[0], self.speed[1])) \r\n if self.rect.y > 500:\r\n self.rect.x = random.randrange(10,600) \r\n self.rect.y = random.randrange(-400,-200)\r\n self.rect.y += 5", "def update(self):\n #print \"player velocity: \", self.player.x_velocity\n temp = pygame.event.get()\n for event in temp:\n if event.type == pygame.QUIT:\n sys.exit()\n if event.type == pygame.KEYDOWN: \n if event.key == pygame.K_ESCAPE:\n self.bckMusic.pause()\n self.pause()\n self.bckMusic.unpause()\n \n if self.player.getRect().top > self.vp.rect.bottom or \\\n self.player.getRect().bottom < self.vp.rect.top:\n self.player.getStateMachine().kill()\n self.bckMusic.stop()\n self.running = False\n self.screen.blit(self.deadImage, self.deadImage.get_rect())\n pygame.display.flip()\n pygame.time.wait(3000)\n \n else:\n self.player.update(temp)\n \n killList = []\n for enemy in self.enemies:\n if self.player.attacking is True and \\\n self.player.getRect().colliderect(enemy.getRect()):\n if enemy.HP > 1:\n enemy.HP -= 20\n coll = self.player.handleCollision(\"enemy\", enemy.getRect())\n self.player.getStateMachine().pushEnemy(enemy, coll)\n else:\n killList.append(enemy)\n self.killSound.play()\n self.score += 100 + self.player.HP + self.player.lives * 100\n \n elif self.player.getRect().colliderect(enemy.getRect()):\n if self.player.HP > 1:\n self.player.HP -= 1\n else:\n self.bckMusic.stop()\n self.running = False\n self.screen.blit(self.deadImage, self.deadImage.get_rect())\n pygame.display.flip()\n pygame.time.wait(3000)\n coll = self.player.handleCollision(\"enemy\", enemy.getRect())\n self.player.getStateMachine().pushEnemy(enemy, coll)\n for enemy in killList:\n self.enemies.remove(enemy)\n enemy = None\n \n for enemy in self.enemies:\n enemy.update()\n \n for solid in self.level.solids:\n if self.player.getRect().colliderect(solid):\n self.player.handleCollision(\"object\", solid)\n \n for platform in self.level.platform:\n if self.player.getRect().colliderect(platform):\n self.player.handleCollision(\"object\", platform)\n \n del killList[:]\n \n for enemy in self.enemies:\n if enemy.getRect().top > self.vp.rect.bottom or \\\n enemy.getRect().bottom < self.vp.rect.top:\n killList.append(enemy)\n \n \n for enemy in killList:\n self.enemies.remove(enemy)\n enemy = None\n \n \n if self.player.getRect().colliderect(self.coinRect):\n self.won = True\n self.coinSound.play()\n self.screen.blit(self.winText, (100,200))\n pygame.display.flip()\n pygame.time.wait(2000)\n self.screen.blit(self.winImage, self.winImage.get_rect())\n pygame.display.flip()\n pygame.time.wait(3000)\n self.running = False\n \n if self.player.getRect().left >= self.vp.rect.right - 300 and \\\n self.player.getRect().left + 300 <= 3800:\n self.vp.rect.right = self.player.getRect().left + 300\n if self.player.getRect().left <= self.vp.rect.left + 300 and \\\n self.player.getRect().left - 300 >= 0:\n self.vp.rect.left = self.player.getRect().left - 300\n if self.vp.rect.right > 3800:\n self.vp.rect.right = 3800\n if self.vp.rect.left < 0:\n self.vp.rect.left = 0\n if self.running == False:\n self.reset()", "def bounce_vertical(self):\n self.velocity.dy = -self.velocity.dy\n return", "def unstuck(self):\n mask = Map.current_map.mask\n \n x_max, y_max = mask.get_size()\n orig_x, orig_y = round(self.x), round(self.y)\n x, y = orig_x , orig_y\n unstuck_aggr = COLLISION_UNSTUCK_AGGRESSION\n \n # Vertical check for any open spots we could put the entity on...\n while y > 0:\n if not mask.get_at((x, y)):\n self.y = y\n self.vy = -unstuck_aggr\n return\n y -= unstuck_aggr\n y = orig_y\n while y < y_max:\n if not mask.get_at((x, y)):\n self.y = y\n self.vy = unstuck_aggr\n return\n y += unstuck_aggr\n y = orig_y\n \n # Horizontal spots?\n while x > 0:\n if not mask.get_at((x, y)):\n self.x = x\n self.vx = -unstuck_aggr\n return\n x -= unstuck_aggr\n x = orig_x\n while x < x_max:\n if not mask.get_at((x, y)):\n self.x = x\n self.vx = unstuck_aggr\n return\n x += unstuck_aggr\n x = orig_x\n \n # Diagonal spots\n while x > 0 and y > 0:\n if not mask.get_at((x, y)):\n self.x, self.y = x, y\n self.vx, self.vy = -unstuck_aggr, -unstuck_aggr\n return\n x, y = x - unstuck_aggr, y - unstuck_aggr\n x, y = orig_x, orig_y\n while x < x_max and y < y_max:\n if not mask.get_at((x, y)):\n self.x, self.y = x, y\n self.vx, self.vy = unstuck_aggr, unstuck_aggr\n return\n x, y = x + unstuck_aggr, y + unstuck_aggr\n x, y = orig_x, orig_y\n while x > 0 and y < y_max:\n if not mask.get_at((x, y)):\n self.x, self.y = x, y\n return\n x, y = x - unstuck_aggr, y + unstuck_aggr\n x, y = orig_x, orig_y\n while x < x_max and y > 0:\n if not mask.get_at((x, y)):\n self.x, self.y = x, y\n return\n x, y = x + unstuck_aggr, y - unstuck_aggr\n x, y = orig_x, orig_y\n \n # All right, I officially give up now.\n print(\"Couldn't unstuck object!\")", "def update(self, dt=1 / 60):\n max_speed = 10\n check_for_collision_with_list = arcade.check_for_collision_with_list\n player = self.player_sprite\n\n # Add gravity and move\n player.change_y -= self.gravity_constant\n speed = sqrt(player.change_x ** 2 + player.change_y ** 2)\n if speed > max_speed:\n ratio = max_speed / speed\n player.change_x *= ratio\n player.change_y *= ratio\n\n player.center_y += player.change_y\n player.center_x += player.change_x\n\n # Check for wall hit\n hit_list = [\n *check_for_collision_with_list(player, self.platforms),\n *check_for_collision_with_list(player, self.moving_platforms),\n ]\n recover = 0.666\n min_shadow_x = 12\n min_shadow_y = 6\n\n for hit in hit_list:\n shadow_x = min(player.right, hit.right) - \\\n max(player.left, hit.left)\n shadow_y = min(player.top, hit.top) - \\\n max(player.bottom, hit.bottom)\n role = getattr(hit, 'role', Role.OBJECT)\n collision_role = Role.OBJECT\n\n shift_y = 0\n if role == Role.RAMP_DOWN:\n shift_y = player.center_x - hit.left\n elif role == Role.RAMP_UP:\n shift_y = max(player.center_x - hit.left, 0) - 64\n\n # Falling down...\n if (player.change_y < 0\n and player.bottom < hit.top + shift_y < player.center_y\n and shadow_x > min_shadow_x\n and shadow_y < 24 + abs(shift_y)):\n player.bottom += max(recover *\n (hit.top + shift_y - player.bottom), 0.5)\n player.change_y = 0\n\n # Going up...\n elif (player.change_y > 0\n and role == collision_role\n and player.top > hit.bottom > player.center_y\n and shadow_x > min_shadow_x\n and shadow_y < 24):\n player.top -= max(recover * (player.top - hit.bottom), 0.5)\n player.change_y = 0\n\n # Going right...\n if (player.change_x > 0\n and (role == collision_role or role == Role.RAMP_UP)\n and player.right > hit.left\n and player.center_x < hit.center_x\n and shadow_y > min_shadow_y\n and shadow_x < 24):\n if role == Role.RAMP_UP:\n player.change_x /= 2\n player.change_y += 4 * player.change_y\n player.center_y += 64 + shift_y\n else:\n player.right -= max(recover *\n (player.right - hit.left), 0.5)\n player.change_x = 0\n\n # Going left...\n elif (player.change_x < 0\n and role == collision_role\n and player.left < hit.right\n and player.center_x > hit.center_x\n and shadow_y > min_shadow_y\n and shadow_x < 24):\n player.left += max(recover * (hit.right - player.left), 0.5)\n player.change_x = 0", "def ifCollide( ball1, ball2 ):\n\t\n\tb1_x, b1_y = ball1.position.xy\n\tb2_x, b2_y = ball2.position.xy\n\t\n\t#vector connect center of particles\n\tdistant = Vector.from_points((b2_x, b2_y), (b1_x, b1_y))\n\t\n\t#if lenght of vector above is less( equal ) than sum of radius ( they overlapping )\n\tif ( ball1.r + ball2.r ) ** 2 >= distant.norm():\n\t\treturn True\n\telse:\n\t\treturn False", "def on_update(self, delta_time):\n # Reset collision data \n total = 0\n for ball in self.ball_list:\n ball.hit = False\n if ball.status != Status.VULNERABLE:\n total += 1\n self.totals.append(total)\n n = len(self.totals)\n self.red_points.append((5+WINDOW_WIDTH + n*0.4, (total*6)))\n self.blue_points.append((5+WINDOW_WIDTH + n*0.4, ((100-total)*6)))\n\n self.red_line_strip = arcade.create_line_strip(self.red_points, arcade.color.RED, 2)\n self.blue_line_strip = arcade.create_line_strip(self.blue_points, arcade.color.BLUE, 2)\n\n for ball in self.ball_list:\n ball.x += ball.change_x\n ball.y += ball.change_y\n if ball.status == Status.INFECTIOUS:\n ball.infectious -= 1\n if ball.infectious == 0:\n ball.immune()\n\n # Collision with walls\n if ball.x < ball.size:\n ball.change_x *= -1\n\n if ball.y < ball.size:\n ball.change_y *= -1\n\n if ball.x > WINDOW_WIDTH - ball.size:\n ball.change_x *= -1\n\n if ball.y > WINDOW_HEIGHT - ball.size:\n ball.change_y *= -1\n\n # Collision with another ball\n for other in self.ball_list:\n if other.id == ball.id:\n continue\n if other.hit:\n continue\n if (-ball.size < (ball.x - other.x) < ball.size) and (-ball.size < (ball.y - other.y) < ball.size):\n (ball.change_x,other.change_x) = (other.change_x,ball.change_x)\n (ball.change_y,other.change_y) = (other.change_y,ball.change_y)\n other.hit = True\n if ball.status == Status.INFECTIOUS:\n other.infect()\n if other.status == Status.INFECTIOUS:\n ball.infect()\n ball.hit = True\n break", "def handle_collisions():\n for sprite in sprite_group:\n for other in pygame.sprite.spritecollide(sprite, sprite_group, False):\n if sprite is not other and DO_KILL:\n sprite.kill()\n other.kill()", "def collide(self, xvel, yvel):\n level = self.current_level\n platforms = level.get_impassables() #TODO: remember that it might be possible to pass through some platforms in some directions.\n slopes = []\n default_platforms = []\n for p in platforms:\n if pygame.sprite.collide_mask(self, p) and p.is_solid:\n if p.is_sloped:\n slopes.append(p)\n else:\n default_platforms.append(p)\n for s in slopes:\n Being.collideWith(self, xvel, yvel, s)\n for p in default_platforms:\n Being.collideWith(self, xvel, yvel, p)\n self.collideExits()", "def bounce(self):\n self.y_dir *= -1 # Reverse vertical direction of travel", "def update(self):\r\n if self.count:\r\n if self.count>=games.screen.fps*1.5:\r\n self.dx=self.dx1\r\n self.dy=self.dy1\r\n self.count=None\r\n if self.left<0:\r\n self.sidebounce()\r\n if self.right>games.screen.width:\r\n self.sidebounce()\r\n if self.top<0:\r\n self.vertbounce()\r\n if self.bottom>games.screen.height:\r\n self.die()\r\n if self.count:\r\n self.count+=1", "def collide(self, score, shooter):\n self.lives -= shooter.damage\n if self.lives <= 0:\n score.update_score()\n self.alive = False", "def interaction_hole(self) -> None:\n x_dead_char = self.moving_character.x_obj\n y_dead_char = self.moving_character.y_obj\n void = ob.Void(x_dead_char, y_dead_char)\n # Replacing character by a Void\n self.grid.obj_list[self.moving_character] = void\n del self.grid.character_list[self.index_character]\n self.grid.character_just_died = True", "def update(self, game):\n self.rect = pygame.Rect(self.x - self.r, self.y - self.r, 2 * self.r, 2 * self.r)\n self.x += self.vx * game.delta\n self.y += self.vy * game.delta\n\n \"\"\"Do not let Player get out of the Game window\"\"\"\n if self.x < self.r:\n if self.vx < 0:\n self.vx = -self.vx\n self.x = self.r\n if self.y < self.r:\n if self.vy < 0:\n self.vy = -self.vy\n self.y = self.r\n if self.x > game.width - self.r:\n if self.vx > 0:\n self.vx = -self.vx\n self.x = game.width - self.r\n if self.y > game.height - self.r:\n if self.vy > 0:\n self.vy = -self.vy\n self.y = game.height - self.r\n\n \"\"\"Bounce conditions for ball\"\"\"\n if pygame.sprite.collide_rect(self, game.main_platform):\n self.y -= 7\n self.vy = -self.vy\n self.vx += game.main_platform.vx\n\n \"\"\"Displacement of ball from striking with platform\"\"\"\n for z in game.platforms:\n if pygame.sprite.collide_rect(self, game.platforms[z]):\n self.y += 7\n self.vy = -self.vy\n game.to_remove.add(z)\n\n \"\"\"Losing after striking red platform\"\"\"\n for z in game.platformsx:\n if pygame.sprite.collide_rect(self, game.platformsx[z]):\n game.draw_lose_screen()" ]
[ "0.7171883", "0.69743687", "0.6876112", "0.68310064", "0.6821696", "0.6743633", "0.6663485", "0.6639343", "0.6614661", "0.66114783", "0.6596486", "0.65865046", "0.65635604", "0.65623677", "0.6554627", "0.65210336", "0.6513835", "0.6488594", "0.6458591", "0.6441815", "0.64399636", "0.6393043", "0.63393503", "0.63372576", "0.6318756", "0.6315025", "0.63118565", "0.63104016", "0.62954015", "0.6293111", "0.6278835", "0.6268152", "0.6268152", "0.6268152", "0.6266687", "0.62514037", "0.62181747", "0.62052214", "0.61999065", "0.61984247", "0.6186615", "0.6182906", "0.6177707", "0.6174208", "0.61567724", "0.61262083", "0.61207527", "0.61111903", "0.6110117", "0.61048776", "0.6103586", "0.6092823", "0.6080893", "0.6068104", "0.6055397", "0.6038644", "0.60259163", "0.60241085", "0.60236514", "0.6023599", "0.60232353", "0.60180146", "0.601486", "0.60111296", "0.60108286", "0.6000503", "0.59918916", "0.599127", "0.598496", "0.59762686", "0.5963513", "0.59592104", "0.59585273", "0.59502", "0.59476954", "0.59442484", "0.59428155", "0.5941647", "0.5937806", "0.5926686", "0.5922392", "0.59097666", "0.59021753", "0.59021753", "0.58962756", "0.58873683", "0.5881734", "0.5881302", "0.58804286", "0.58769816", "0.5872712", "0.58605844", "0.5859502", "0.5847137", "0.584578", "0.58396274", "0.5836103", "0.5810093", "0.5803999", "0.5801189" ]
0.61831194
41
Setup strategies to use by the validator. These strategies can be provided
def _using(*args, validator: "DictValidator") -> "DictValidator": def setup_strategy(validator, strategy) -> "DictValidator": if isinstance(strategy, SortingStrategy): validator.sorting = strategy elif isinstance(strategy, FilteringStrategy): validator.filtering = strategy elif isinstance(strategy, PrintingStrategy): validator.printing = strategy else: raise CertumException("The strategy provided for the validator is unknown.") return validator for arg in args: if isinstance(arg, list): for strategy in arg: validator = setup_strategy(validator, strategy) elif isinstance(arg, Strategy): validator = setup_strategy(validator, arg) else: raise CertumException("The strategy provided for the validator is unknown.") return validator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_default_strategies(self, fleetmanager_strategy, transport_strategy, customer_strategy, directory_strategy,\n station_strategy):\n self.fleetmanager_strategy = load_class(fleetmanager_strategy)\n self.transport_strategy = load_class(transport_strategy)\n self.customer_strategy = load_class(customer_strategy)\n self.directory_strategy = load_class(directory_strategy)\n self.station_strategy = load_class(station_strategy)\n logger.debug(\"Loaded default strategy classes: {}, {}, {}, {} and {}\".format(self.fleetmanager_strategy,\n self.transport_strategy,\n self.customer_strategy,\n self.directory_strategy,\n self.station_strategy))", "def initialize_location_strategies(self):\n locator_manager.register_locators(\"sf\", lex_locators)\n locator_manager.register_locators(\"text\", \"Salesforce.Locate Element by Text\")\n locator_manager.register_locators(\"title\", \"Salesforce.Locate Element by Title\")\n\n # This does the work of actually adding all of the above-registered\n # location strategies, plus any that were registered by keyword\n # libraries.\n locator_manager.add_location_strategies()", "def set_strategies(players, strategies):\n if players.num_players != len(strategies):\n raise ValueError(\"len(strategies) must equal num_players\")\n for player, strategy in zip(players.tuple_, strategies):\n player.play = MethodType(strategy, player, Player)", "def test_unexpected_strategy():\n assert strategies == {\n 'css': FindByCss,\n 'xpath': FindByXPath,\n 'tag': FindByTag,\n 'name': FindByName,\n 'text': FindByText,\n 'id': FindById,\n 'value': FindByValue,\n }", "def __init__(self):\n self.strategy = Strategy(self)", "def strategy(func):\n strategies.append(func)\n return func", "def init_default_strategies() -> None:\n register_string_format(\"binary\", st.binary())\n register_string_format(\"byte\", st.binary().map(lambda x: b64encode(x).decode()))\n\n def make_basic_auth_str(item: Tuple[str, str]) -> str:\n return _basic_auth_str(*item)\n\n latin1_text = st.text(alphabet=st.characters(min_codepoint=0, max_codepoint=255))\n\n register_string_format(\"_basic_auth\", st.tuples(latin1_text, latin1_text).map(make_basic_auth_str)) # type: ignore\n register_string_format(\"_bearer_auth\", st.text().map(\"Bearer {}\".format))", "def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy4))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # Add are you a lucker strategies\n strategies.extend(\n generate_meta_strategy_pair(AreYouALuckerStrategy))\n\n # Add Greenberg strategies\n strategies.extend(\n generate_meta_strategy_pair(GreenbergStrategy))\n\n # Add RPS Meta Fix strategies\n strategies.extend(\n generate_meta_strategy_pair(RPSMetaFixStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def strategy(self, strategy):\n\n if strategy == \"auto\":\n error = dll.wasmtime_config_strategy_set(self.__ptr__, 0)\n elif strategy == \"cranelift\":\n error = dll.wasmtime_config_strategy_set(self.__ptr__, 1)\n elif strategy == \"lightbeam\":\n error = dll.wasmtime_config_strategy_set(self.__ptr__, 2)\n else:\n raise WasmtimeError(\"unknown strategy: \" + str(strategy))\n if error:\n raise WasmtimeError.__from_ptr__(error)", "def setup(cls):\n cls.location = {\"longitude\": 0.1270, \"latitude\": 51.5194}\n cls.search_query = {\n \"search_key\": \"intro_service\",\n \"search_value\": \"intro_alice\",\n \"constraint_type\": \"==\",\n }\n cls.search_radius = 5.0\n cls.admin_host = \"127.0.0.1\"\n cls.admin_port = 8021\n cls.ledger_url = \"http://127.0.0.1:9000\"\n config_overrides = {\n \"models\": {\n \"strategy\": {\n \"args\": {\n \"location\": cls.location,\n \"search_query\": cls.search_query,\n \"search_radius\": cls.search_radius,\n \"admin_host\": cls.admin_host,\n \"admin_port\": cls.admin_port,\n \"ledger_url\": cls.ledger_url,\n }\n }\n },\n }\n\n super().setup(config_overrides=config_overrides)\n\n # behaviours\n cls.faber_behaviour = cast(\n FaberBehaviour,\n cls._skill.skill_context.behaviours.faber,\n )\n\n # dialogues\n cls.default_dialogues = cast(\n DefaultDialogues, cls._skill.skill_context.default_dialogues\n )\n cls.http_dialogues = cast(\n HttpDialogues, cls._skill.skill_context.http_dialogues\n )\n cls.oef_search_dialogues = cast(\n OefSearchDialogues, cls._skill.skill_context.oef_search_dialogues\n )\n\n # handlers\n cls.http_handler = cast(HttpHandler, cls._skill.skill_context.handlers.http)\n cls.oef_search_handler = cast(\n OefSearchHandler, cls._skill.skill_context.handlers.oef_search\n )\n\n # models\n cls.strategy = cast(Strategy, cls._skill.skill_context.strategy)\n\n cls.logger = cls._skill.skill_context.logger\n\n # mocked objects\n cls.mocked_method = \"SOME_METHOD\"\n cls.mocked_url = \"www.some-url.com\"\n cls.mocked_version = \"some_version\"\n cls.mocked_headers = \"some_headers\"\n cls.body_dict = {\"some_key\": \"some_value\"}\n cls.body_str = \"some_body\"\n cls.body_bytes = b\"some_body\"\n cls.mocked_body_bytes = json.dumps(cls.body_str).encode(\"utf-8\")\n cls.mocked_query = Query(\n [Constraint(\"some_attribute_name\", ConstraintType(\"==\", \"some_value\"))],\n DataModel(\n \"some_data_model_name\",\n [\n Attribute(\n \"some_attribute_name\",\n str,\n False,\n \"Some attribute descriptions.\",\n )\n ],\n ),\n )\n cls.mocked_proposal = Description(\n {\n \"contract_address\": \"some_contract_address\",\n \"token_id\": \"123456\",\n \"trade_nonce\": \"876438756348568\",\n \"from_supply\": \"543\",\n \"to_supply\": \"432\",\n \"value\": \"67\",\n }\n )\n\n # list of messages\n cls.list_of_http_messages = (\n DialogueMessage(\n HttpMessage.Performative.REQUEST,\n {\n \"method\": cls.mocked_method,\n \"url\": cls.mocked_url,\n \"headers\": cls.mocked_headers,\n \"version\": cls.mocked_version,\n \"body\": cls.mocked_body_bytes,\n },\n is_incoming=False,\n ),\n )\n\n cls.list_of_oef_search_messages = (\n DialogueMessage(\n OefSearchMessage.Performative.SEARCH_SERVICES,\n {\"query\": cls.mocked_query},\n ),\n )", "def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy4))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # Add are you a lucker strategies\n strategies.extend(\n generate_meta_strategy_pair(AreYouALuckerStrategy))\n\n # Add RPS Meta Fix strategies\n strategies.extend(\n generate_meta_strategy_pair(RPSMetaFixStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n do_rotations = [True for _ in strategies]\n return strategies, do_rotations", "def setUp(self):\n test_helpers.patch_environ(self)\n\n data = []\n\n strategy1 = data_types.FuzzStrategyProbability()\n strategy1.strategy_name = 'fork,corpus_subset,'\n strategy1.probability = 0.33\n strategy1.engine = 'libFuzzer'\n data.append(strategy1)\n\n strategy2 = data_types.FuzzStrategyProbability()\n strategy2.strategy_name = 'random_max_len,value_profile,'\n strategy2.probability = 0.34\n strategy2.engine = 'libFuzzer'\n data.append(strategy2)\n ndb.put_multi(data)\n\n distribution = fuzz_task.get_strategy_distribution_from_ndb()\n\n environment.set_value('USE_BANDIT_STRATEGY_SELECTION', True)\n environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution)", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n do_rotations = [True for _ in strategies]\n return strategies, do_rotations", "def _get_strategies(self) -> Dict[str, str]:\n strategies = [method for method in dir(self) if STRATEGY_IDENTIFIER in method]\n\n if not strategies:\n logger.warning(\n \"There are no strategy provided. \"\n \"Make sure the implemented strategy methods \"\n \"start contain the '%s' term.\" % STRATEGY_IDENTIFIER\n )\n return {str(n_method): method for n_method, method in enumerate(strategies)}", "def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy4))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # Add are you a lucker strategies\n strategies.extend(\n generate_meta_strategy_pair(AreYouALuckerStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def InitStrategy(self, sname, strategy):\n\n self._string = sname\n\n self.strategy = strategy\n self.postracker = position.PositionTracker(self.strategy)", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def setup(cls):\n super().setup()\n cls.search_behaviour = cast(\n GenericSearchBehaviour, cls._skill.skill_context.behaviours.search\n )\n cls.tx_behaviour = cast(\n GenericTransactionBehaviour, cls._skill.skill_context.behaviours.transaction\n )\n cls.strategy = cast(GenericStrategy, cls._skill.skill_context.strategy)\n\n cls.logger = cls._skill.skill_context.logger", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n strategies.extend(\n generate_meta_strategy_pair(\n WrappedRFindStrategy,\n limits=limits,\n sources=sources,\n shenanigans=False))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add memory pattern strategies\n strategies.extend(\n generate_meta_strategy_pair(MemoryPatternsV7Strategy))\n\n do_rotations = [True for _ in strategies]\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy4))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(\n GeometryV4Strategy))\n\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def test_strategies(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.options.auto_fence = True\n self.supervisor.supvisors.options.conciliation_strategy = 1\n self.supervisor.supvisors.options.starting_strategy = 2\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n self.assertDictEqual({'auto-fencing': True, 'starting': 'MOST_LOADED',\n 'conciliation': 'INFANTICIDE'}, rpc.get_strategies())", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n\n strategies.extend([\n generate_meta_strategy_pair(\n WrappedRFindStrategy,\n limits=limits,\n sources=sources,\n shenanigans=False)[0]])\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def setUp(self):\n test_helpers.patch_environ(self)\n\n data = []\n\n strategy1 = data_types.FuzzStrategyProbability()\n strategy1.strategy_name = 'corpus_subset,'\n strategy1.probability = 0.33\n strategy1.engine = 'afl'\n data.append(strategy1)\n\n strategy2 = data_types.FuzzStrategyProbability()\n strategy2.strategy_name = 'corpus_mutations_radamsa,corpus_subset,'\n strategy2.probability = 0.34\n strategy2.engine = 'afl'\n data.append(strategy2)\n\n strategy3 = data_types.FuzzStrategyProbability()\n strategy3.strategy_name = 'corpus_subset,'\n strategy3.probability = 0.33\n strategy3.engine = 'afl'\n data.append(strategy3)\n ndb.put_multi(data)\n\n distribution = fuzz_task.get_strategy_distribution_from_ndb()\n\n environment.set_value('USE_BANDIT_STRATEGY_SELECTION', True)\n environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution)", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n\n strategies.extend(\n generate_meta_strategy_pair(\n WrappedRFindStrategy,\n limits=limits,\n sources=sources,\n shenanigans=False))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def __init__(self, algorithms={}, strategies={}, pickleFile=None):\n if pickleFile is None:\n self.algds = algorithms\n self.stratds = strategies\n self._bestalg = None\n self._unifpf = None\n else:\n if pickleFile.find('.gz') < 0:\n pickleFile += '.gz'\n with gzip.open(pickleFile) as f:\n entry = pickle.load(f)\n self.algds = entry.algds\n self.stratds = entry.stratds\n self._bestalg = entry._bestalg\n self._unifpf = entry._unifpf", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n\n strategies.extend(\n generate_meta_strategy_pair(\n WrappedRFindStrategy,\n limits=limits,\n sources=sources,\n shenanigans=False))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n strategies.extend(\n generate_meta_strategy_pair(\n WrappedRFindStrategy,\n limits=limits,\n sources=sources,\n shenanigans=False))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n strategies.extend(\n generate_meta_strategy_pair(\n WrappedRFindStrategy,\n limits=limits,\n sources=sources,\n shenanigans=False))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(MemoryPatternsV7Strategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def training_pattern_setup(self, **overrides):\n raise NotImplementedError", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n strategies.extend(\n generate_meta_strategy_pair(\n WrappedRFindStrategy,\n limits=limits,\n sources=sources,\n shenanigans=False))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n strategies.extend(\n generate_meta_strategy_pair(\n WrappedRFindStrategy,\n limits=limits,\n sources=sources,\n shenanigans=False))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add memory patterns v7\n strategies.extend(\n generate_meta_strategy_pair(MemoryPatternsV7Strategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def strategy(strategy_name: str):\r\n def wrapper(finder_class):\r\n global strategies\r\n strategies[strategy_name] = finder_class\r\n return finder_class\r\n return wrapper", "def create_strategy(config):\n build_strategy = paddle.static.BuildStrategy()\n exec_strategy = paddle.static.ExecutionStrategy()\n\n exec_strategy.num_threads = 1\n exec_strategy.num_iteration_per_drop_scope = (\n 10000\n if 'AMP' in config and config.AMP.get(\"use_pure_fp16\", False) else 10)\n\n fuse_op = True if 'AMP' in config else False\n\n fuse_bn_act_ops = config.get('fuse_bn_act_ops', fuse_op)\n fuse_elewise_add_act_ops = config.get('fuse_elewise_add_act_ops', fuse_op)\n fuse_bn_add_act_ops = config.get('fuse_bn_add_act_ops', fuse_op)\n enable_addto = config.get('enable_addto', fuse_op)\n\n build_strategy.fuse_bn_act_ops = fuse_bn_act_ops\n build_strategy.fuse_elewise_add_act_ops = fuse_elewise_add_act_ops\n build_strategy.fuse_bn_add_act_ops = fuse_bn_add_act_ops\n build_strategy.enable_addto = enable_addto\n\n return build_strategy, exec_strategy", "def test_multiple_optimizers(self):\n self.run_subtests(\n {\n \"sharding_strategy\": [\n ShardingStrategy.FULL_SHARD,\n ShardingStrategy.SHARD_GRAD_OP,\n ]\n },\n self._test_multiple_optimizers,\n )", "def decideStrategy(self,opponents_moves):\n if not opponents_moves: #No prior opponent moves to learn from so random.\n self.setStrategy(RPSRandomStrategy)\n else:\n strategies = [RPSOppositeStrategy,RPSLeastUsedStrategy,RPSMostUsedStrategy,RPSRandomStrategy]\n self.setStrategy(random.choice(strategies))", "def test_strategy_attribute_correct():\n assert 'css' == FindByCss.strategy\n assert 'id' == FindById.strategy\n assert 'name' == FindByName.strategy\n assert 'tag' == FindByTag.strategy\n assert 'text' == FindByText.strategy\n assert 'value' == FindByValue.strategy\n assert 'xpath' == FindByXPath.strategy", "def setCaseStrategy(self, strategy):\n return self._set(caseStrategy=strategy)", "def setCaseStrategy(self, strategy):\n return self._set(caseStrategy=strategy)", "def setUp(self):\n test_helpers.patch_environ(self)\n test_helpers.patch(self, [\n 'clusterfuzz._internal.bot.fuzzers.engine_common.decide_with_probability'\n ])\n self.mock.decide_with_probability.return_value = True\n\n data = []\n\n strategy1 = data_types.FuzzStrategyProbability()\n strategy1.strategy_name = 'random_max_len,value_profile,'\n strategy1.probability = 1\n strategy1.engine = 'libFuzzer'\n data.append(strategy1)\n ndb.put_multi(data)\n\n distribution = fuzz_task.get_strategy_distribution_from_ndb()\n\n environment.set_value('USE_BANDIT_STRATEGY_SELECTION', True)\n environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution)", "def strategy_config(self, strategy_config):\n self._strategy_config = strategy_config", "def _validate_estimator(self):\n\n if self.smote is not None:\n if isinstance(self.smote, SMOTE):\n self.smote_ = self.smote\n else:\n raise ValueError('smote needs to be a SMOTE object.'\n 'Got {} instead.'.format(type(self.smote)))\n else:\n self.smote_ = SMOTE(ratio=self.ratio, k_neighbors=3,\n random_state=self.random_state)\n\n if self.tomek is not None:\n if isinstance(self.tomek, TomekLinks):\n self.tomek_ = self.tomek\n else:\n raise ValueError('tomek needs to be a TomekLinks object.'\n 'Got {} instead.'.format(type(self.tomek)))\n else:\n self.tomek_ = TomekLinks(ratio=\"all\",\n random_state=self.random_state)", "def generate():\n strategies = []\n strategies.extend(\n generate_meta_strategy_pair(\n StatisticalPredictionStrategy))\n do_rotations = [True for _ in strategies]\n return strategies, do_rotations", "def setUp(self):\n super(TestPickingValidate, self).setUp()", "def setStrategy(self, value):\n return self._set(strategy=value)", "def configure_optimizers(self):\n allowed = list(OPTIM_LOOKUP.keys())\n if self.optimizer not in allowed:\n raise ValueError(\n f\"Illegal optimizer given. Got {self.optimizer}. Allowed: {allowed}.\"\n )\n\n allowed = list(SCHED_LOOKUP.keys())\n if self.scheduler not in allowed:\n raise ValueError(\n f\"Illegal scheduler given. Got {self.scheduler}. Allowed: {allowed}.\"\n )\n\n if self.optim_params is None:\n self.optim_params = {\n \"encoder\": {\"lr\": 0.00005, \"weight_decay\": 0.00005},\n \"decoder\": {\"lr\": 0.0005, \"weight_decay\": 0.0005},\n }\n\n params = adjust_optim_params(self.model, self.optim_params)\n optimizer = OPTIM_LOOKUP[self.optimizer](params)\n\n if self.lookahead:\n optimizer = OPTIM_LOOKUP[\"lookahead\"](optimizer, k=5, alpha=0.5)\n\n if self.scheduler_params is None:\n self.scheduler_params = {}\n\n scheduler = {\n \"scheduler\": SCHED_LOOKUP[self.scheduler](\n optimizer, **self.scheduler_params\n ),\n \"monitor\": \"val_loss\",\n \"interval\": \"epoch\",\n \"frequency\": 1,\n }\n\n return [optimizer], [scheduler]", "def setStrategy(self, strategy):\r\n if strategy in NNPlayer.LEGAL_STRATEGY:\r\n self.strategy = strategy\r\n return True\r\n return False", "def setup_provider(self):\n pass", "def test_weighted_strategy_pool(self):\n environment.set_value('STRATEGY_SELECTION_METHOD', 'multi_armed_bandit')\n strategy_pool = strategy_selection.generate_weighted_strategy_pool(\n strategy_list=strategy.LIBFUZZER_STRATEGY_LIST,\n use_generator=True,\n engine_name='libFuzzer')\n self.assertTrue(\n strategy_pool.do_strategy(strategy.RANDOM_MAX_LENGTH_STRATEGY))\n self.assertTrue(strategy_pool.do_strategy(strategy.VALUE_PROFILE_STRATEGY))\n self.assertFalse(\n strategy_pool.do_strategy(strategy.CORPUS_MUTATION_RADAMSA_STRATEGY))\n self.assertFalse(strategy_pool.do_strategy(strategy.FORK_STRATEGY))", "def setUp(self):\n test_helpers.patch_environ(self)\n test_helpers.patch(self, [\n 'clusterfuzz._internal.bot.fuzzers.engine_common.decide_with_probability'\n ])\n self.mock.decide_with_probability.return_value = True\n\n data = []\n\n strategy1 = data_types.FuzzStrategyProbability()\n strategy1.strategy_name = 'corpus_subset,'\n strategy1.probability = 1\n strategy1.engine = 'afl'\n data.append(strategy1)\n ndb.put_multi(data)\n\n distribution = fuzz_task.get_strategy_distribution_from_ndb()\n\n environment.set_value('USE_BANDIT_STRATEGY_SELECTION', True)\n environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution)", "def _further_validate_and_setup(self) -> None:\n\n # Make sure parameters make sense/are valid\n if len(self.validated['learners']) != len(self.validated['param_grids']):\n raise SchemaError(autos=None,\n errors='The lists of of learners and parameter '\n 'grids must be the same size.')\n if (self.validated['hashed_features'] is not None\n and self.validated['hashed_features'] == 0):\n self.validated['hashed_features'] = self._n_features_feature_hashing\n if self.validated['lognormal'] and self.validated['power_transform']:\n raise SchemaError(autos=None,\n errors='Both \"lognormal\" and \"power_transform\" '\n 'were set simultaneously.')\n if len(self.validated['learners']) != len(self.validated['param_grids']):\n raise SchemaError(autos=None,\n errors='The \"learners\" and \"param_grids\" '\n 'parameters were both set and the '\n 'lengths of the lists are unequal.')", "def _setManager(self, mgr: \"StrategyManager\") -> None:", "def test_multi_armed_bandit_strategy_pool(self):\n environment.set_value('STRATEGY_SELECTION_METHOD', 'default')\n strategy_selection.generate_weighted_strategy_pool(\n strategy_list=strategy.LIBFUZZER_STRATEGY_LIST,\n use_generator=True,\n engine_name='libFuzzer')\n environment.set_value('STRATEGY_SELECTION_METHOD', 'multi_armed_bandit')\n strategy_selection.generate_weighted_strategy_pool(\n strategy_list=strategy.LIBFUZZER_STRATEGY_LIST,\n use_generator=True,\n engine_name='libFuzzer')", "def setup_optimizers(self, *args, **kwargs):\n\n # self.optimizers.append(...)\n # self.loss.append(...)\n pass", "def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=True,\n ))\n do_rotations = [True for _ in strategies]\n return strategies, do_rotations", "def setup_validation(self, client, *args, **keyword_args):\n raise NotImplementedError(\"Please fix me.\")", "def setup_validation(self, client, *args, **keyword_args):\n raise NotImplementedError(\"Please fix me.\")", "def _TP_estimator_requirements(estimator):\n if estimator == 'Natural':\n do_DD = True\n do_DR = False\n do_RR = True\n elif estimator == 'Davis-Peebles':\n do_DD = True\n do_DR = True\n do_RR = False\n elif estimator == 'Hewett':\n do_DD = True\n do_DR = True\n do_RR = True\n elif estimator == 'Hamilton':\n do_DD = True\n do_DR = True\n do_RR = True\n elif estimator == 'Landy-Szalay':\n do_DD = True\n do_DR = True\n do_RR = True\n else:\n available_estimators = _list_estimators()\n if estimator not in available_estimators:\n msg = (\"Input `estimator` must be one of the following:{0}\".format(available_estimators))\n raise HalotoolsError(msg)\n\n return do_DD, do_DR, do_RR", "def setPoolingStrategy(self, strategy):\n if strategy == \"AVERAGE\":\n return self._set(poolingStrategy=strategy)\n elif strategy == \"SUM\":\n return self._set(poolingStrategy=strategy)\n else:\n return self._set(poolingStrategy=\"AVERAGE\")", "def setPoolingStrategy(self, strategy):\n if strategy == \"AVERAGE\":\n return self._set(poolingStrategy=strategy)\n elif strategy == \"SUM\":\n return self._set(poolingStrategy=strategy)\n else:\n return self._set(poolingStrategy=\"AVERAGE\")", "def __init__(self, designation):\n try:\n self._designation = designation.lower()\n parts = self._designation.split(\"/\")\n self.mutate: mutation.MutationStrategy = mutation.__strategies__[parts[0]](int(parts[1]))\n self.crossover: crossover.CrossoverStrategy = crossover.__strategies__[parts[2]]()\n if len(parts) >= 4:\n self.repair: repair.RepairStrategy = repair.__strategies__[parts[3]]()\n else:\n self.repair: repair.RepairStrategy = repair.clip()\n self._designation += \"/clip\"\n except AttributeError or KeyError or IndexError or ValueError:\n raise ValueError(f\"Invalid evolution strategy '{designation}'\")", "def revalue_strategies():\n\t\t# Clear old values out\n\t\tStrategy.clear_strategies()\n\t\tStrategy.calculate_strategies(Person.recent_memory, int(Person.get_no_of_instances()/2))", "def test_strategies(strategy):\n X = Vectorizer(strategy=strategy).fit_transform(X_text)\n assert X.shape == (10, 20)\n assert \"corpus_york\" in X", "def register_rules(enforcer):\n rules = POLICIES.list_rules()\n enforcer.register_defaults(rules)", "def metric_strategies(self, metric_strategies):\n\n self._metric_strategies = metric_strategies", "def __init__(\n self,\n async_strategy=\"impute\",\n impute_strategy=\"cl_min\",\n acq_fun=None,\n acq_fun_kwargs=None,\n acq_optimizer=\"lbfgs\",\n acq_optimizer_kwargs=None,\n **kwargs\n ):\n super().__init__(**kwargs)\n\n # validations\n\n # allowed combinations of async strategies and acquisition functions\n allowed_combinations = {\n \"impute\": {\n \"EI\": GaussianProcess_EI,\n \"LCB\": GaussianProcess_LCB,\n \"PI\": GaussianProcess_PI,\n },\n \"asy_ts\": {\"AsyTS\": AsyTS},\n }\n if async_strategy not in allowed_combinations.keys():\n raise ValueError(\n \"Expected async_strategy to be in {} with GP as surrogate, got {}\".format(\n list(allowed_combinations.keys()), async_strategy\n )\n )\n\n if async_strategy == \"impute\" and self.pruner:\n if not self.interim_results:\n raise ValueError(\n \"Optimizer GP with async strategy `impute` only supports Pruner with interim_results==True, got {}\".format(\n self.interim_results\n )\n )\n\n if acq_fun not in allowed_combinations[async_strategy] and acq_fun is not None:\n raise ValueError(\n \"Expected acq_fun to be in {} with GP as surrogate and {} as async_strategy, got {}\".format(\n list(allowed_combinations[async_strategy].keys()),\n async_strategy,\n acq_fun,\n )\n )\n\n # async_strategy\n self.async_strategy = async_strategy\n\n # configure acquisition function\n if acq_fun is None:\n # default acq_fun is the first in the dict\n acq_fun = list(allowed_combinations[async_strategy].keys())[0]\n self.acq_fun = allowed_combinations[self.async_strategy][acq_fun]()\n self.acq_func_kwargs = acq_fun_kwargs\n\n # configure acquisiton function optimizer\n allowed_acq_opt = [\"sampling\", \"lbfgs\"]\n if acq_optimizer not in allowed_acq_opt:\n raise ValueError(\n \"expected acq_optimizer to be in {}, got {}\".format(\n allowed_acq_opt, acq_optimizer\n )\n )\n self.acq_optimizer = acq_optimizer\n if acq_optimizer_kwargs is None:\n acq_optimizer_kwargs = dict()\n\n if self.async_strategy == \"asy_ts\":\n # default value is 100 and max value is 1000 for asy ts\n self.n_points = np.clip(acq_optimizer_kwargs.get(\"n_points\", 100), 10, 1000)\n else:\n self.n_points = acq_optimizer_kwargs.get(\"n_points\", 10000)\n self.n_restarts_optimizer = acq_optimizer_kwargs.get(\"n_restarts_optimizer\", 5)\n self.acq_optimizer_kwargs = acq_optimizer_kwargs\n\n # configure impute strategy\n if self.async_strategy == \"impute\":\n allowed_impute_strategies = [\"cl_min\", \"cl_max\", \"cl_mean\", \"kb\"]\n if impute_strategy not in allowed_impute_strategies:\n raise ValueError(\n \"expected impute_strategy to be in {}, got {}\".format(\n allowed_impute_strategies, impute_strategy\n )\n )\n self.impute_strategy = impute_strategy\n\n # estimator that has not been fit on any data.\n self.base_model = None\n\n if self.async_strategy == \"impute\":\n self._log(\"Impute Strategy: {}\".format(self.impute_strategy))", "def setSuggestionStrategy(self, value):\n if value == SuggestionStrategy.OCR:\n self.setBooleanOption(8, True)\n elif value == SuggestionStrategy.TYPO:\n self.setBooleanOption(8, False)\n else:\n raise VoikkoException(\"Invalid suggestion strategy\")", "def __init__(self, strategy=None):\n self.strategy = strategy if strategy is not None else GenericStrategyComponent()\n self.is_game_over = False\n self.is_kicked = False\n self.is_winner = False", "def setup(self, runner):\n msg = None\n try:\n msg = \"Failed to start protocol connection\"\n self.connect()\n\n msg = None\n\n for cls in self.implements:\n getattr(self, cls.name).setup()\n\n msg = \"Post-connection steps failed\"\n self.after_connect()\n except Exception:\n if msg is not None:\n self.logger.warning(msg)\n self.logger.warning(traceback.format_exc())\n raise", "def init_envs():\n myenv = env(\n Ndim=N_DIMS,\n lambda_over_dx=LAMBDA_OVER_DX,\n R_dt=R_DT,\n norm_Poisson=NORM_POISSON,\n Ngrid=N_GRID,\n Nhits=N_HITS,\n )\n if POLICY == -1:\n mymodel = reload_model(MODEL_PATH, inputshape=myenv.NN_input_shape)\n mypol = RLPolicy(\n env=myenv,\n model=mymodel,\n )\n else:\n mypol = HeuristicPolicy(\n env=myenv,\n policy=POLICY,\n steps_ahead=STEPS_AHEAD,\n )\n return myenv, mypol", "def generate():\n strategies = []\n strategies.extend(\n generate_meta_strategy_pair(\n GeometryV4Strategy,\n alpha=0.1))\n do_rotations = [True for _ in strategies]\n return strategies, do_rotations", "def setup(self, options=None, extractor=None):\n # Get our programmatic options\n self._util.use_options(options, extractor)\n\n # Overwrite non defaults in self.values with values from config\n self._util.use_config_file()", "def init_modes(self):\n self._verify_not_using_threaded_mpm()\n\n self._init_screenshot_mode()\n self._init_debug_mode()\n self._init_webapi_cors_header()\n self.init_theme()", "def generate():\n strategies, do_rotations = [], []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(\n CentrifugalBumblepuppy16h,\n mirroring=False))\n do_rotations.extend([False])\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n do_rotations.extend([True, True])\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n do_rotations.extend([True, True])\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n do_rotations.extend([True, True])\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def _setup(self):\n raise NotImplementedError()", "def setup_validation(self, client):\n raise NotImplementedError(\"Please fix me.\")", "def setup_validation(self, client):\n raise NotImplementedError(\"Please fix me.\")", "def conciliate_conflicts(supvisors, strategy, conflicts):\n if strategy == ConciliationStrategies.SENICIDE:\n instance = SenicideStrategy(supvisors)\n elif strategy == ConciliationStrategies.INFANTICIDE:\n instance = InfanticideStrategy(supvisors)\n elif strategy == ConciliationStrategies.USER:\n instance = UserStrategy(supvisors)\n elif strategy == ConciliationStrategies.STOP:\n instance = StopStrategy(supvisors)\n elif strategy == ConciliationStrategies.RESTART:\n instance = RestartStrategy(supvisors)\n elif strategy == ConciliationStrategies.RUNNING_FAILURE:\n instance = FailureStrategy(supvisors)\n # apply strategy to conflicts\n instance.conciliate(conflicts)", "def test_valid_estimator(strategy: str) -> None:\n mapie = MapieRegressor(estimator=DummyRegressor(), **STRATEGIES[strategy])\n mapie.fit(X_toy, y_toy)\n assert isinstance(mapie.single_estimator_, DummyRegressor)\n for estimator in mapie.estimators_:\n assert isinstance(estimator, DummyRegressor)", "def test_default_pool_deterministic(self):\n strategy_pool = strategy_selection.generate_default_strategy_pool(\n strategy_list=strategy.LIBFUZZER_STRATEGY_LIST, use_generator=True)\n\n self.assertTrue(\n strategy_pool.do_strategy(strategy.CORPUS_MUTATION_RADAMSA_STRATEGY))\n self.assertTrue(strategy_pool.do_strategy(strategy.CORPUS_SUBSET_STRATEGY))\n self.assertTrue(\n strategy_pool.do_strategy(strategy.RANDOM_MAX_LENGTH_STRATEGY))\n self.assertTrue(strategy_pool.do_strategy(strategy.VALUE_PROFILE_STRATEGY))\n self.assertTrue(strategy_pool.do_strategy(strategy.FORK_STRATEGY))", "def set_strategy(self, policy):\n self.suspicious = policy\n # The rationale is that if a gun policy is active the agent chose more often to use force as a initial strategy\n if self.suspicious:\n self.s_aggressor = random.choices(['Force', 'nForce'], [.7, .4])\n else:\n self.s_aggressor = random.choices(['Force', 'nForce'], [.14, .86])", "def setUp(self):\n self.validator = Validator()\n self.users = Users()", "def _set_up_pacman_algorithm_listings(\n self, algorithms, optional_algorithms, xml_paths, inputs,\n required_outputs):\n\n # deduce if the algorithms are internal or external\n algorithms_names = list(algorithms)\n\n # set up XML reader for standard PACMAN algorithms XML file reader\n # (used in decode_algorithm_data_objects function)\n xml_paths.append(os.path.join(\n os.path.dirname(operations.__file__),\n \"algorithms_metadata.xml\"))\n xml_paths.append(os.path.join(\n os.path.dirname(algorithm_reports.__file__),\n \"reports_metadata.xml\"))\n\n converter_xml_path = list()\n converter_xml_path.append(os.path.join(\n os.path.dirname(file_format_converters.__file__),\n \"converter_algorithms_metadata.xml\"))\n\n # decode the algorithms specs\n xml_decoder = ConvertAlgorithmsMetadata(xml_paths)\n algorithm_data_objects = xml_decoder.decode_algorithm_data_objects()\n xml_decoder = ConvertAlgorithmsMetadata(converter_xml_path)\n converter_algorithm_data_objects = \\\n xml_decoder.decode_algorithm_data_objects()\n\n # filter for just algorithms we want to use\n algorithm_data = self._get_algorithm_data(\n algorithms_names, algorithm_data_objects,\n converter_algorithm_data_objects)\n optional_algorithms_datas = self._get_algorithm_data(\n optional_algorithms, algorithm_data_objects,\n converter_algorithm_data_objects)\n optional_algorithms_datas.extend(\n converter_algorithm_data_objects.values())\n\n # sort_out_order_of_algorithms for execution\n self._sort_out_order_of_algorithms(\n inputs, required_outputs, algorithm_data,\n optional_algorithms_datas)", "def _get_validation_strategy(self):\n strat = {}\n self._validate_validation_strategy()\n if self.validation_strategy == \"auto\":\n if self._get_mode() == \"Explain\":\n strat = {\n \"validation_type\": \"split\",\n \"train_ratio\": 0.75,\n \"shuffle\": True,\n \"stratify\": True,\n }\n elif self._get_mode() == \"Perform\":\n strat = {\n \"validation_type\": \"kfold\",\n \"k_folds\": 5,\n \"shuffle\": True,\n \"stratify\": True,\n }\n elif self._get_mode() in [\"Compete\", \"Optuna\"]:\n strat = {\n \"validation_type\": \"kfold\",\n \"k_folds\": 10,\n \"shuffle\": True,\n \"stratify\": True,\n }\n if self._get_ml_task() == REGRESSION:\n if \"stratify\" in strat:\n # it's better to always check\n # before delete (trust me)\n del strat[\"stratify\"]\n return strat\n else:\n strat = deepcopy(self.validation_strategy)\n if self._get_ml_task() == REGRESSION:\n if \"stratify\" in strat:\n del strat[\"stratify\"]\n return strat", "def optimizer_setup(model, params):\n if params.optimizer == 'adam':\n if params.freeze_backbone:\n optimizer = optimizer_handler.layer_specific_adam(model, params)\n else:\n optimizer = optimizer_handler.plain_adam(model, params)\n elif params.optimizer == 'sgd':\n if params.freeze_backbone:\n optimizer = optimizer_handler.layer_specific_sgd(model, params)\n else:\n optimizer = optimizer_handler.plain_sgd(model, params)\n\n if params.zero_bn_bias_decay:\n optimizer = zero_wdcay_bn_bias(optimizer)\n\n return optimizer", "def _initial_setup(self, **train_kwargs):\n super(NetworkValidationBase, self)._initial_setup(**train_kwargs)", "def generate():\n strategies = []\n strategies.extend(\n generate_meta_strategy_pair(\n GeometryV4Strategy, mirroring=False,\n alpha=0.5))\n do_rotations = [False for _ in strategies]\n assert len(strategies) == 1\n return strategies, do_rotations", "def setup(self):\n log.msg(\"Fetching required net test inputs...\")\n for net_test_loader in self.netTestLoaders:\n yield self.fetchAndVerifyNetTestInput(net_test_loader)\n\n if self.bouncer:\n log.msg(\"Looking up test helpers...\")\n yield self.lookupTestHelpers()", "def _validate_user_module_and_set_functions(self):\n user_module_name = self._environment.module_name\n\n self._pre_model_fn = getattr(self._default_inference_handler, \"default_pre_model_fn\", None)\n self._model_warmup_fn = getattr(\n self._default_inference_handler, \"default_model_warmup_fn\", None\n )\n\n if find_spec(user_module_name) is not None:\n user_module = importlib.import_module(user_module_name)\n\n self._model_fn = getattr(\n user_module, \"model_fn\", self._default_inference_handler.default_model_fn\n )\n\n transform_fn = getattr(user_module, \"transform_fn\", None)\n input_fn = getattr(user_module, \"input_fn\", None)\n predict_fn = getattr(user_module, \"predict_fn\", None)\n output_fn = getattr(user_module, \"output_fn\", None)\n pre_model_fn = getattr(user_module, \"pre_model_fn\", None)\n model_warmup_fn = getattr(user_module, \"model_warmup_fn\", None)\n\n if transform_fn and (input_fn or predict_fn or output_fn):\n raise ValueError(\n \"Cannot use transform_fn implementation in conjunction with \"\n \"input_fn, predict_fn, and/or output_fn implementation\"\n )\n\n self._transform_fn = transform_fn or self._default_transform_fn\n self._input_fn = input_fn or self._default_inference_handler.default_input_fn\n self._predict_fn = predict_fn or self._default_inference_handler.default_predict_fn\n self._output_fn = output_fn or self._default_inference_handler.default_output_fn\n if pre_model_fn is not None:\n self._pre_model_fn = pre_model_fn\n if model_warmup_fn is not None:\n self._model_warmup_fn = model_warmup_fn\n else:\n self._model_fn = self._default_inference_handler.default_model_fn\n self._input_fn = self._default_inference_handler.default_input_fn\n self._predict_fn = self._default_inference_handler.default_predict_fn\n self._output_fn = self._default_inference_handler.default_output_fn\n\n self._transform_fn = self._default_transform_fn", "def __init__(self):\n super().__init__()\n TemplateEngineFactory.register_factory('Jinja2Engine', Jinja2Engine.Factory)\n\n step1 = PrepareAppConfTransfiguration()\n step2 = ConfReaderToContextTransfiguration()\n step3 = ValidateCollectionTags()\n step4 = AttributeChainedTransfiguration('mbean')\n step5 = AttributeChainedTransfiguration('connection')\n\n self.add(step1)\n self.add(step2)\n self.add(step3)\n self.add(step4)\n self.add(step5)", "def load_strategy_class(self):\n path2 = Path(__file__).parent.parent.joinpath(\"strategies\")\n self.load_strategy_class_from_folder(path2, \"vnpy.app.cta_strategy.strategies\")", "def placement_strategies(self) -> Optional[Sequence['outputs.ScheduleTargetEcsParametersPlacementStrategy']]:\n return pulumi.get(self, \"placement_strategies\")", "def run_experiments():\r\n result = eval_strategy_range(always_roll, 1, 10)\r\n print('Best always_roll strategy:', result)\r\n\r\n if True: # Change to True when ready to test make_comeback_strategy\r\n result = eval_strategy_range(make_comeback_strategy, 5, 15)\r\n print('Best comeback strategy:', result)\r\n\r\n if True: # Change to True when ready to test make_mean_strategy\r\n result = eval_strategy_range(make_mean_strategy, 1, 10)\r\n print('Best mean strategy:', result)\r\n\r\n \"*** You may add additional experiments here if you wish ***\"" ]
[ "0.6532167", "0.5933181", "0.55383205", "0.5475055", "0.5461148", "0.54593635", "0.54482967", "0.5417319", "0.5391583", "0.53656155", "0.53485143", "0.5343126", "0.5342159", "0.5331955", "0.5308945", "0.53017586", "0.5301568", "0.5298491", "0.5298491", "0.5286031", "0.526086", "0.5240757", "0.5238277", "0.52226776", "0.5219429", "0.521794", "0.5214315", "0.52093565", "0.51991594", "0.5181124", "0.5172547", "0.51602316", "0.5157701", "0.5152942", "0.51474756", "0.51428086", "0.513802", "0.5132524", "0.51266664", "0.51083106", "0.5098609", "0.50533384", "0.5035991", "0.50323147", "0.50317353", "0.50317353", "0.5015751", "0.5007389", "0.49577335", "0.49559167", "0.4950669", "0.49477118", "0.49462917", "0.49392638", "0.49369025", "0.49240926", "0.49224126", "0.49100912", "0.49090257", "0.4897357", "0.4892746", "0.48914117", "0.48833427", "0.48833427", "0.48628408", "0.48531505", "0.48531505", "0.48461133", "0.48411378", "0.48391932", "0.48383966", "0.48270187", "0.47967318", "0.4792663", "0.47814047", "0.4779441", "0.477826", "0.47678947", "0.476331", "0.4756154", "0.4752389", "0.47494006", "0.4748049", "0.4748049", "0.47395563", "0.4739179", "0.47373813", "0.47362402", "0.4735019", "0.47302952", "0.4728037", "0.47128522", "0.46905527", "0.46893427", "0.4685034", "0.46716264", "0.46463716", "0.46192038", "0.45999366", "0.45993096" ]
0.60849404
1
Target a value following the path 'self.path' inside the dictionary.
def _target(path: List[Any], dictionary: Dict[str, Any]) -> Any: if not path: return dictionary current = dictionary for key in path: try: current = current[key] except KeyError as error: path = " -> ".join(path) raise CertumException(f"The path '{path}' doesn't exist") from error return current
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_path(self, key, value):\n return set_path(self, key, self.from_obj(value))", "def set(self, path, value):\n pth = self._path[:]\n pth.extend(stringify_keys(path))\n set_nested(self._request.session, pth, value)\n # self._value = get_nested_default(self._dct, self._path)\n self.save()", "def __setitem__(self, path, value):\n\n path = self.__check_path__(path)\n\n # d - dict, p - path (keys sequence)\n def set_key(d, p):\n k = p[0]\n\n if len(p) == 1:\n d[k] = value\n else:\n if not isinstance(d.setdefault(k, self._factory()), dict):\n d[k] = self._factory()\n set_key(d[k], p[1:])\n\n set_key(self.__dict__, path)", "def set_by_path(data: Dict[str, T], path: Sequence[str], value: T):\n get_by_path(data, path[:-1])[path[-1]] = value", "def __set__(self, obj, val):\n try:\n self._resolve(val)\n except IOError, e:\n Parameterized(name=\"%s.%s\"%(obj.name,self._attrib_name)).warning('%s'%(e.args[0]))\n\n super(Path,self).__set__(obj,val)", "def __setattr__(self, key, value):\n if isinstance(value, DotDict) and key != '_parent':\n value.__dict__['_parent'] = weakref.proxy(self)\n super(DotDictWithAcquisition, self).__setattr__(key, value)", "def __getitem__(self, key):\n path = self.path\n if self.path_is_string:\n path = [path]\n return path[key]", "def setByPath(self, keys, value):\n self.getByPath(keys[:-1])[keys[-1]] = value", "def _set_by_path(tree, keys, value):\n _get_by_path(tree, keys[:-1])[keys[-1]] = value", "def _set_by_path(tree, keys, value):\n _get_by_path(tree, keys[:-1])[keys[-1]] = value", "def update(self, tree_path, value):\n\t\traise NotImplementedError", "def setPath(self, name, value):\n response = self.extendPath(name, value, True, True)\n return response", "def set_upward(self, key, value):\n context = self.dicts[-1]\n for d in reversed(self.dicts):\n if key in d:\n context = d\n break\n context[key] = value", "def _set_path(self):\n self.path = self._get_path()\n self.depth = self.get_depth()\n\n self.save()", "def path(self, path):\n self._path = path", "def __init__(self, path):\n for key, value in path.items():\n setattr(self, \"_%s\" % key, value)", "def test_utils_set_dict_value_from_path_updating_fields():\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n ralph_utils.set_dict_value_from_path(dictionary, [\"foo\", \"bar\"], \"baz\")\n assert dictionary == {\"foo\": {\"bar\": \"baz\"}}", "def _get(self, key, current_node):\n pass", "def __setitem__(self, key, value):\n self.tree[key] = value", "def __set__(self, instance, value):\n instance.doc[self.slug] = value", "def set_posting_and_dictionary_path(self, path):\n self.posting_and_dictionary_path = path", "def set_by_path(root, path, value):\n \n sub_data = root\n for key in path[:-1]:\n sub_data = sub_data[key]\n sub_data[path[-1]] = value", "def __get__(self, obj, objtype):\n raw_path = super(Path,self).__get__(obj,objtype)\n return self._resolve(raw_path)", "def test_utils_set_dict_value_from_path_creating_new_fields():\n dictionary = {}\n ralph_utils.set_dict_value_from_path(dictionary, [\"foo\", \"bar\"], \"baz\")\n assert dictionary == {\"foo\": {\"bar\": \"baz\"}}", "def src_subpath(self, val: str):\n self[\"src_subpath\"] = val", "def _get_value(match_entry: Dict, path0: str) -> any:\n if path0 is None:\n current_el = match_entry\n else:\n path = path0.split('/')\n current_el = match_entry\n for p in path:\n if current_el is None:\n break\n current_el = current_el.get(p)\n return current_el", "def update(self, value):\n orig = get_nested_default(self._request.session, self._path)\n orig.update(value)\n set_nested(self._request.session, self._path, orig)\n # self._value = get_nested_default(self._session, self._path)\n self.save()", "def __setattr__(self, key, value):\n if '_children' in self.__dict__ and\\\n key in self.__dict__['_children'] and\\\n isinstance(value, int) and\\\n hasattr(self.__dict__['_children'][key], 'value'):\n getattr(self, key).value = value\n else:\n super().__setattr__(key, value)", "def set_by_path(root, items, value):\n get_by_path(root, items[:-1])[items[-1]] = value", "def get_path(self, key):\n return get_path(self, key)", "def path(self, path):\n\n self._path = path", "def path(self, path):\n\n self._path = path", "def path(self, path):\n\n self._path = path", "def path(self, path):\n\n self._path = path", "def path(self, path):\n\n self._path = path", "def set_property(self, key, value):\n\n self.current_context[key] = value\n\n context_name = self.current_context['context']\n optimal_values = CONTEXTS[context_name]['optimal_values']\n\n if key in optimal_values.keys() and optimal_values[key] != value:\n self.current_context['optimal'] = False\n sep = '====='\n self.current_context[key] = '{}> {} <{}'.format(\n sep, self.current_context[key], sep)\n\n # Mark as non optimal the whole parent chain\n while True:\n if CONTEXTS[context_name]['parent'] is None:\n break\n\n parent = getattr(self, CONTEXTS[context_name]['parent'])\n parent['optimal'] = False\n context_name = parent['context']", "def test_set_with_deep_key_path_with_string():\n deep_key_path = 'deep.key.path'\n test_value = 'deep key path value'\n\n config.set(deep_key_path, test_value)\n assert isinstance(config.get('deep'), dict)\n assert config.get(deep_key_path) == test_value", "def _set_link(self, value, handler):\n self._mapping[value] = handler", "def assign(self, key, value):\n key_split = key.split('.')\n cur_dict = self\n for k in key_split[:-1]:\n try:\n cur_dict = cur_dict[k]\n except KeyError:\n cur_dict[k] = self.__class__() # so that derived classes\n # remain true to type\n cur_dict = cur_dict[k]\n cur_dict[key_split[-1]] = value", "def setByPathAndIndex(self, keys, index, value):\n self.getByPath(keys[:-1])[keys[-1]][index] = value", "def _put(self, key, value, current_node):\n pass", "def _setOverride(self, configDict, path, value, overrideDict):\n key = path[0]\n\n if len(path) == 1:\n overrideDict[key] = self._coerceOption(configDict, key, value)\n return\n\n if key in configDict:\n if not isinstance(configDict[key], dict):\n raise UsageError(\n \"Found intermediate path element that is not a dictionary\"\n )\n\n if key not in overrideDict:\n overrideDict[key] = {}\n\n self._setOverride(\n configDict[key], path[1:],\n value, overrideDict[key]\n )", "def target(self, value):\n self._target = value", "def path(self):\n ...", "def __getitem__(self, key: str) -> Any:\n\n # Make sure the definition is up-to-date\n self._set_definition(self.pyfiguration.definition)\n\n # Make sure the key exists in the definition\n keyDefinition = from_dot_notation(\n field=\".\".join([*self.parents, key]), obj=self.get_definition()\n )\n\n # Keep track of the keys that have been accessed\n if isinstance(self.accessStatus.get(key, None), bool):\n self.accessStatus[key] = True\n\n # Get the value from the store\n defaultValue = from_dot_notation(\n field=\".\".join([*self.parents, key]), obj=self.get_definition()\n ).get(\"default\", None)\n if defaultValue is None and \"required\" not in keyDefinition:\n defaultValue = {}\n value = self.store.get(self.__keytransform__(key), defaultValue)\n\n # Perform a predefined set of tests on the value\n self.check_value(self.__keytransform__(key), value)\n\n # Return the checked value\n return value", "def __getattr__(self, key):\n return self.sub.__getattribute__(key)", "def __set__(self, instance, value):\n instance._values[self.name] = self.process(value)", "def visit_dict(self, sydict):\n self.current.update(sydict)", "def _get_object(self, path):\n if path == \"/\":\n return self.target\n\n parts = path[1:].split(\"/\")\n last = self.target\n for part in parts:\n if type(last) == dict:\n last = last[part]\n else:\n last = getattr(last, \"get_\" + part)()\n return last", "def select_path(self):\r\n pass", "def __getitem__(self, key):\n key_split = key.split('.')\n last_index = len(key_split) - 1\n current = self\n for i, k in enumerate(key_split):\n try:\n current = getattr(current, k)\n except KeyError:\n if i == last_index:\n raise\n temp_dict = DotDictWithAcquisition()\n temp_dict.__dict__['_parent'] = weakref.proxy(current)\n current = temp_dict\n return current", "def lookup(self, key):", "def setitem_key_value(self):\n raise NotImplementedError", "def set_path_url(self, data):\n self._path_url = self._uni(data)", "def set_item(self, key, value):\n key, value = str(key), str(value)\n key = self.locate.match_context_key(key)\n replaced = self.selector.get(key, None)\n self.selector[key] = value\n return key, replaced", "def __setattr__(self, name, value):\n if name == 'source' or name == 'destination':\n # produce \"canonical\" form of a source / destination\n # FIXME: we need to handle arbitrary netmasks here\n if value is not None and value.endswith('/32'):\n value = value[:-3]\n elif name == 'goto' or name == 'jump':\n if value is not None and not isinstance(value, Target):\n value = Target(value)\n elif name == 'matches':\n if not isinstance(value, list):\n raise Exception(\"matches attribute requires a list\")\n self.__dict__[name] = value", "def get(self, key):", "def get(self, key):", "def putPath(self, path, pathname):\n self.paths[path] = pathname", "def device_path(self, value):\n self._device_path = value", "def visit_record(self, syrecord):\n for other_key, other_value in syrecord.items():\n try:\n getattr(self.current, other_key).update(other_value)\n except KeyError:\n setattr(self.current, other_key, other_value)", "def __setattr__(self, key, value):\n if key != 'json_data':\n self.get_data()[key] = value\n else:\n super(BaseJsonEncodableObject, self).__setattr__(key, value)", "def __setitem__(self,key,value):\n assert isinstance(key,int)\n if isinstance(value,str):\n super().__setitem__(key,Node(key,value))\n else:\n assert value.nodeid == key\n super().__setitem__(key,value)", "def __setitem__(self, key, value):\n while self is not None:\n if key in self._dict:\n self._dict[key] = value\n return\n else:\n self = self.parent\n raise KeyError(\"%s was not declared\" % key)", "def path(self, value: Union[str, Path]):\n if type(value) is str:\n self._path = Path(path=value)\n elif type(value) is Path:\n self._path = value\n elif value is None:\n self._path = None\n else:\n raise TypeError(\"Path must be of type 'str' or 'Path' ({} given)\".format(type(value)))", "def set(self, key, value):\n self.context.set(self.prefix+'.'+key, value)", "def has_path_to(self, v):\n\n return self._marked[v]", "def set_value(self, reference_path, value):\n\n if (reference_path.start_protocol is not None and\n reference_path.start_protocol != self.id):\n\n raise ValueError('The reference path does not target this protocol.')\n\n if reference_path.property_name is None or reference_path.property_name == '':\n raise ValueError('The reference path does specify a property to set.')\n\n if reference_path in self.provided_outputs:\n raise ValueError('Output values cannot be set by this method.')\n\n set_nested_attribute(self, reference_path.property_name, value)", "def path(self, paths):\n resolved = paths[0]\n try:\n data = self.story.resolve_context(paths[0])\n item = data[paths[0]]\n for path in paths[1:]:\n if isinstance(path, str):\n item = item[path]\n\n assert isinstance(path, dict)\n object_type = path.get(\"$OBJECT\")\n if object_type == \"range\":\n item = self.range(path[\"range\"], item)\n else:\n resolved = self.object(path)\n # Allow a namedtuple to use keys or index\n # to retrieve data.\n if TypeUtils.isnamedtuple(item) and isinstance(\n resolved, str\n ):\n item = getattr(item, resolved)\n else:\n item = item[resolved]\n return item\n except IndexError:\n raise StoryscriptRuntimeError(\n message=f\"List index out of bounds: {resolved}\"\n )\n except (KeyError, AttributeError):\n raise StoryscriptRuntimeError(\n message=f'Map does not contain the key \"{resolved}\". '\n f\"Use map.get(key: <key> default: <default value>) to \"\n f\"prevent an exception from being thrown. Additionally, you \"\n f\"may also use map.contains(key: <key>) to check if a key \"\n f\"exists in a map.\"\n )\n except TypeError:\n return None", "def path_value(self, **kwargs):\n s = \"\"\n show_meta = kwargs.get(\"show_meta\", self.SHOW_META)\n show_path = show_meta and kwargs.get(\"show_path\", self.SHOW_PATH)\n if show_path:\n call_info = self.reflect.info\n if call_info:\n s = \"({}:{})\".format(self._get_path(call_info['file']), call_info['line'])\n return s", "def _set_item_impl(self, key: Any, value: Any) -> None:\n from omegaconf.omegaconf import _maybe_wrap\n\n from .nodes import AnyNode, ValueNode\n\n if isinstance(value, Node):\n do_deepcopy = not self._get_flag(\"no_deepcopy_set_nodes\")\n if not do_deepcopy and isinstance(value, Container):\n # if value is from the same config, perform a deepcopy no matter what.\n if self._get_root() is value._get_root():\n do_deepcopy = True\n\n if do_deepcopy:\n value = copy.deepcopy(value)\n value._set_parent(None)\n\n try:\n old = value._key()\n value._set_key(key)\n self._validate_set(key, value)\n finally:\n value._set_key(old)\n else:\n self._validate_set(key, value)\n\n if self._get_flag(\"readonly\"):\n raise ReadonlyConfigError(\"Cannot change read-only config container\")\n\n input_config = isinstance(value, Container)\n target_node_ref = self._get_node(key)\n special_value = value is None or value == \"???\"\n\n input_node = isinstance(value, ValueNode)\n if isinstance(self.__dict__[\"_content\"], dict):\n target_node = key in self.__dict__[\"_content\"] and isinstance(\n target_node_ref, ValueNode\n )\n\n elif isinstance(self.__dict__[\"_content\"], list):\n target_node = isinstance(target_node_ref, ValueNode)\n # We use set_value if:\n # 1. Target node is a container and the value is MISSING or None\n # 2. Target node is a container and has an explicit ref_type\n # 3. If the target is a NodeValue then it should set his value.\n # Furthermore if it's an AnyNode it should wrap when the input is\n # a container and set when the input is an compatible type(primitive type).\n\n should_set_value = target_node_ref is not None and (\n (\n isinstance(target_node_ref, Container)\n and (special_value or target_node_ref._has_ref_type())\n )\n or (target_node and not isinstance(target_node_ref, AnyNode))\n or (isinstance(target_node_ref, AnyNode) and is_primitive_type(value))\n )\n\n def wrap(key: Any, val: Any) -> Node:\n is_optional = True\n if not is_structured_config(val):\n ref_type = self._metadata.element_type\n else:\n target = self._get_node(key)\n if target is None:\n if is_structured_config(val):\n ref_type = self._metadata.element_type\n else:\n is_optional = target._is_optional()\n ref_type = target._metadata.ref_type\n return _maybe_wrap(\n ref_type=ref_type,\n key=key,\n value=val,\n is_optional=is_optional,\n parent=self,\n )\n\n def assign(value_key: Any, val: ValueNode) -> None:\n assert val._get_parent() is None\n v = val\n v._set_parent(self)\n v._set_key(value_key)\n self.__dict__[\"_content\"][value_key] = v\n\n if input_node and target_node:\n # both nodes, replace existing node with new one\n assign(key, value)\n elif not input_node and target_node:\n # input is not node, can be primitive or config\n if should_set_value:\n self.__dict__[\"_content\"][key]._set_value(value)\n elif input_config:\n assign(key, value)\n else:\n self.__dict__[\"_content\"][key] = wrap(key, value)\n elif input_node and not target_node:\n # target must be config, replace target with input node\n assign(key, value)\n elif not input_node and not target_node:\n if should_set_value:\n self.__dict__[\"_content\"][key]._set_value(value)\n elif input_config:\n assign(key, value)\n else:\n self.__dict__[\"_content\"][key] = wrap(key, value)", "def setProperty(self, child, key, value):\n\n # First get the child's dictionary\n childDict = self.getInfoDict(child)\n if childDict:\n childDict[key] = value", "def __set_full_path_of_file(self, value):\n self.full_path_of_file = value", "def _update_value(self, value):\n super(SubOutputPlug, self)._update_value(value)\n for plug in self.connections:\n plug.value = value\n parent_value = self.parent_plug.value or {}\n parent_value[self.key] = value\n self.parent_plug.value = parent_value", "def __setattr__(self, name: str, value: Any) -> None:\n super().__setattr__(name, value)\n # update entry as well (to sync with CLI, etc. )\n if not name.startswith(\"_\") and name in self._entries:\n self._entries[name].value = value", "def save(self, **kwargs):\n if len(self.path) > 0:\n self.path = self.path.strip().rstrip()\n super(TargetPath, self).save(**kwargs)", "def path(self) -> Path:\n return self[0]", "def __fspath__(self):\n raise NotImplementedError", "def _visit(path, key, value):\n if path + (key,) == update_location:\n return (key, updated_value)\n return (key, value)", "def __setitem__(self, key, value):\n if '.' in key:\n self.assign(key, value)\n else:\n setattr(self, key, value)", "def log_paths(self, value):\n self._log_paths = value", "def _set(self, ikey, value):\n obj = self\n keys = ikey.split('.')\n for idx in range(0, len(keys)):\n key = keys[idx]\n if not obj.has_key(key):\n ckey = '.'.join(keys[idx:])\n nkey, nval = convert_dot_notation(ckey, value)\n if isinstance(obj, DotDict):\n super(DotDict, obj).__setitem__(nkey, nval)\n else:\n obj.__setitem__(nkey, nval)\n return\n if key != keys[-1]:\n try:\n obj = super(DotDict, obj).__getitem__(key)\n except:\n try:\n obj = obj[key]\n except:\n raise\n if not isinstance(obj, dict):\n msg = 'Cannot assign new value, internal obj is not dict'\n raise Exception(msg)\n if isinstance(obj, DotDict):\n super(DotDict, obj).__setitem__(key, value)\n else:\n obj.__setitem__(key, value)", "def __getitem__(self, key):\n\n if '.' in key:\n path = key.split('.', 1)\n return self.parser.get(path[0], path[1])\n else:\n return self.parser.defaults()[key]", "def _raw(self,key):\n return self.__child[key]", "def path(self, x):\n yield self\n yield from self.next(x).path(x)", "def __getitem__(self, path):\n if path in self._BRANCHES:\n return self._BRANCHES[path](self)\n resource = self.__load__(path)\n if resource is None:\n raise KeyError\n if isinstance(resource, dict):\n return self._child_from_dict(path, resource)\n return resource", "def __setitem__(self, key, value):\n self.set_attribute(key, value)", "def unglom(\r\n d: T_StrAnyMapping, path: str, value: typing.Any\r\n) -> T_StrAnyMapping:\r\n try:\r\n return glom.assign(d, path, value)\r\n except KeyError:\r\n parent, child = path.rsplit(\".\", 1)\r\n return unglom(d, parent, {child: value})", "def setPath(self, path):\n if self._path != path:\n self._path = path\n self.__update_preview()", "def default_handler(self, elem, attr, url, pos):\n obj = self._base_handler(elem, attr, url, pos)\n\n if obj is None:\n return\n\n if attr is None:\n new = elem.text[:pos] + obj.rel_path + elem.text[len(url) + pos:]\n elem.text = new\n else:\n cur = elem.get(attr)\n if not pos and len(cur) == len(url):\n new = obj.rel_path # most common case\n else:\n new = cur[:pos] + obj.rel_path + cur[pos + len(url):]\n\n elem.set(attr, new)\n LOGGER.info(\"Remapped url of the file: %s to the path: %s \" % (url, obj.rel_path))\n self._urlMap[url] = obj.rel_path\n return obj", "def _set_direct(self, option, value):\n splitvals = option.split('/')\n section, key = \"/\".join(splitvals[:-1]), splitvals[-1]\n\n try:\n self.set(section, key, value)\n except NoSectionError:\n self.add_section(section)\n self.set(section, key, value)", "def set(self, key, value):\n #try to lock the tree. If we succeed make sure\n #we dont lose updates from any other process\n if self._storage.lock():\n self._refresh_tree_ref()\n #get current top-level node and make a value-ref\n node = self._follow(self._tree_ref)\n value_ref = ValueRef(value)\n #insert and get new tree ref\n self._tree_ref = self._insert(node, key, value_ref)\n self._tree_ref = self._blacken(self._follow(self._tree_ref))", "def _adjust_path_values(self, variable_name: str, value: str) -> str:\n if not self._file_path:\n return value\n for token in FileConfig.PATH_TOKEN:\n if token in variable_name:\n config_file_dir = os.path.dirname(self._file_path)\n resolved_path = os.path.join(config_file_dir, value)\n value = os.path.realpath(resolved_path)\n break\n return value", "def get_path(self):\n return self.path", "def setOverride(cls, configDict, path, value, overrideDict):\n key = path[0]\n\n if len(path) == 1:\n overrideDict[key] = cls.coerceOption(configDict, key, value)\n return\n\n if key in configDict:\n if not isinstance(configDict[key], dict):\n raise UsageError(\n \"Found intermediate path element that is not a dictionary\"\n )\n\n if key not in overrideDict:\n overrideDict[key] = {}\n\n cls.setOverride(\n configDict[key], path[1:],\n value, overrideDict[key]\n )", "def _set_key(cls, spec, value):\n spec[cls.KEY] = value\n if cls.REF:\n spec[\"ref\"] = value", "def pathfor( name, **matchdict ) :", "def macros_subpath(self, val: str):\n self[\"macros_subpath\"] = val", "def __init__(self, key, value, handler, getter, contains):\n self._key = key\n self._mapping = {}\n self._getter = getter\n self._contains = contains\n self._set_link(value, handler)", "def __fspath__(self):\n return str(self)" ]
[ "0.66520655", "0.618119", "0.6123845", "0.60922164", "0.60772496", "0.59788", "0.59782517", "0.595644", "0.5857021", "0.5857021", "0.5855367", "0.58048785", "0.57281256", "0.5721013", "0.5697208", "0.5649894", "0.5646482", "0.5641645", "0.564031", "0.5637099", "0.56196165", "0.55678004", "0.5548479", "0.55128706", "0.54988265", "0.54970217", "0.5490061", "0.5485262", "0.5476167", "0.54734516", "0.54555494", "0.54555494", "0.54555494", "0.54555494", "0.54555494", "0.5454027", "0.5446578", "0.54307336", "0.54279613", "0.5426114", "0.5425198", "0.54142284", "0.5408329", "0.54020655", "0.53571856", "0.53532696", "0.53502864", "0.53429735", "0.53377134", "0.53307533", "0.5325991", "0.53252053", "0.5318216", "0.5308419", "0.5302779", "0.53005123", "0.52761996", "0.52761996", "0.5244654", "0.523949", "0.5229021", "0.5227051", "0.5215732", "0.5208098", "0.5205562", "0.52004683", "0.5192809", "0.5189667", "0.5187881", "0.5186089", "0.51844245", "0.51826113", "0.51444644", "0.5144449", "0.514055", "0.5137691", "0.51308155", "0.51306355", "0.51254714", "0.51252645", "0.51145905", "0.51144785", "0.51137316", "0.5101756", "0.50928515", "0.50872767", "0.50847125", "0.50812286", "0.50811225", "0.50744265", "0.5072945", "0.5070972", "0.5064766", "0.50644034", "0.5059112", "0.505624", "0.5042363", "0.5041734", "0.5039493", "0.5035363" ]
0.5476351
28
Records the vote note, we drop down and need to commit this transaction manually since we need to read, compute, and then write a new value. This will not work with mysql ISAM tables, so if you are using mysql, it is highly recommended to change this table to InnoDB to support transactions using
def record_vote(request): result = "success" try: rating, created = Rating.objects.get_or_create(key=request.POST['id']) key = request.POST['id'] ip = request.META['REMOTE_ADDR'] event, newevent = RatingEvent.objects.get_or_create(key=key,ip=ip) if not newevent: event.is_changing = True event.old_value = event.value event.value = int(request.POST['vote']) rating.add_rating(event) rating.save() event.save() result = "%s/5 rating ( %s votes)" % (rating.avg_rating, rating.total_votes) except: transaction.rollback() result = 'error' else: transaction.commit() return HttpResponse(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self)->None:\n database.cursor.execute(\"INSERT INTO votes(question,user_id,value) VALUES(%s,%s,%s) RETURNING id\", (\n self.question,\n self.user,\n self.value\n ))\n super().save()", "def record_vote(self, obj, user, vote):\r\n if vote not in (+1, 0, -1):\r\n raise ValueError('Invalid vote (must be +1/0/-1)')\r\n ctype = ContentType.objects.get_for_model(obj)\r\n try:\r\n v = self.get(user=user, content_type=ctype,\r\n object_id=obj._get_pk_val())\r\n if vote == 0:\r\n v.delete()\r\n else:\r\n v.vote = vote\r\n v.save()\r\n except models.ObjectDoesNotExist:\r\n if vote != 0:\r\n self.create(user=user, content_type=ctype,\r\n object_id=obj._get_pk_val(), vote=vote)", "def record_vote(self, obj, vote, user):\n if vote not in (+1, 0, -1):\n raise ValueError('Invalid vote (must be +1/0/-1)')\n content_type = ContentType.objects.get_for_model(obj)\n # First, try to fetch the instance of this row from DB\n # If that does not exist, then it is the first time we're creating it\n # If it does, then just update the previous one\n try:\n vote_obj = self.get(voter=user, content_type=content_type, object_id=obj._get_pk_val())\n if vote == 0 and not ZERO_VOTES_ALLOWED:\n vote_obj.delete()\n else:\n vote_obj.vote = vote\n vote_obj.save()\n\n except ObjectDoesNotExist:\n #This is the first time we're creating it\n try:\n if not ZERO_VOTES_ALLOWED and vote == 0:\n # This shouldn't be happening actually\n return\n vote_obj = self.create(voter=user, content_type=content_type, object_id=obj._get_pk_val(), vote=vote)\n except:\n print(( '{file}: something went wrong in creating a vote object at {line}'.format(file=str('__FILE__'), line=str('__LINE__'))))\n raise ObjectDoesNotExist\n\n return vote_obj", "def create_vote(self):\n con = psycopg2.connect(**self.config)\n cur = con.cursor(cursor_factory=RealDictCursor)\n try:\n query = \"INSERT INTO votes(user_id, answer_id, vote) VALUES(%s, %s, %s)\"\n cur.execute(query, (self.user_id, self.answer_id, self.vote_value))\n con.commit()\n except Exception as e:\n print(e)\n con.close()\n return False\n return True", "def tpc_vote(self, transaction):\n raise NotImplementedError", "def record_vote_simple(self, obj, user, vote):#renamed from original record_vote\n if vote not in (+1, 0, -1):\n raise ValueError('Invalid vote (must be +1/0/-1)')\n ctype = ContentType.objects.get_for_model(obj)\n try:\n v = self.get(user=user, content_type=ctype,\n object_id=obj._get_pk_val())\n if vote == 0:\n v.delete()\n else:\n v.vote = vote\n v.save()\n except models.ObjectDoesNotExist:\n if vote != 0:\n self.create(user=user, content_type=ctype,\n object_id=obj._get_pk_val(), vote=vote)", "def update(self)->None:\n database.cursor.execute(\"UPDATE votes SET value = %s WHERE id = %s\", (\n self.value,\n self.id\n ))\n database.connection.commit()", "def votePost(votePostEvent):\n userID = votePostEvent[\"data\"][\"user_id\"]\n postID = votePostEvent[\"data\"][\"post_id\"]\n vote = int(votePostEvent[\"data\"][\"vote\"])\n query = ('SELECT * FROM vote WHERE post_id = \\\"{}\\\" AND user_id = \\\"{}\\\"'.format(postID, userID))\n with conn.cursor() as cur:\n affectedRow = cur.execute(query)\n if affectedRow > 0:\n row = cur.fetchone()\n if vote > 0 and not row[2]:\n query = (\n 'UPDATE vote SET upvote = true, downvote = false WHERE post_id = \\\"{}\\\" AND user_id = \\\"{}\\\"'.format(\n postID, userID))\n cur.execute(query)\n query = 'UPDATE post SET upvote = upvote+1, downvote = downvote-1 WHERE post_id = \\\"{}\\\"'.format(postID)\n cur.execute(query)\n elif vote < 0 and not row[3]:\n query = (\n 'UPDATE vote SET upvote = false, downvote = true WHERE post_id = \\\"{}\\\" AND user_id = \\\"{}\\\"'.format(\n postID, userID))\n cur.execute(query)\n query = 'UPDATE post SET upvote = upvote-1, downvote = downvote+1 WHERE post_id = \\\"{}\\\"'.format(postID)\n cur.execute(query)\n else:\n if vote > 0:\n query = (\n 'INSERT INTO vote (user_id, post_id, upvote, downvote) VALUES ( \\\"{}\\\", \\\"{}\\\", true, false)'.format(\n userID, postID))\n cur.execute(query)\n query = 'UPDATE post SET upvote = upvote+1 WHERE post_id = \\\"{}\\\"'.format(postID)\n cur.execute(query)\n else:\n query = (\n 'INSERT INTO vote (user_id, post_id, upvote, downvote) VALUES ( \\\"{}\\\", \\\"{}\\\", false, true)'.format(\n userID, postID))\n cur.execute(query)\n query = 'UPDATE post SET downvote = downvote+1 WHERE post_id = \\\"{}\\\"'.format(postID)\n cur.execute(query)\n conn.commit()", "def update_vote(self):\n if not self.answer_id:\n return False\n try:\n con = psycopg2.connect(**self.config)\n cur = con.cursor(cursor_factory=RealDictCursor)\n query = \"UPDATE votes SET vote=%s WHERE answer_id=%s AND user_id=%s\"\n cur.execute(query, (self.vote_value, self.answer_id, self.user_id))\n con.commit()\n except Exception as e:\n print(e)\n con.close()\n return False\n return True", "def upsert_bill_votes(self):\n\n return self.execute(\"\"\"\n INSERT INTO bill_votes\n (\n bill_type\n , chamber\n , congressional_session\n , vote_dttm\n , vote_cnt\n , passage_requirement\n , result\n , congress\n , source_url\n , updated_dttm\n , vote_id\n , yea_vote_cnt\n , nay_vote_cnt\n , abstain_vote_cnt\n , present_vote_cnt\n ) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n ON CONFLICT (vote_id) DO NOTHING\"\"\", self.insert_tuple)", "def __write_note(self, handle, nbr):\n try:\n note = self.database.get_note_from_handle(handle)\n self.__write_row(nbr, handle, note)\n except:\n note = \"NOT FOUND\"\n self.__write_row(nbr, handle, note)", "def migrate(cls)->None:\n database.cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS votes(\n id serial PRIMARY KEY,\n question integer,\n user_id integer,\n value integer\n )\"\"\")\n database.connection.commit()", "def vote(self):\n if self.vote_exists():\n return self.update_vote()\n return self.create_vote()", "def toggle_vote(self, user, value):\n try:\n v = Vote.objects.get(user=user, content=self)\n except Vote.DoesNotExist:\n Vote.objects.create(user=user, content=self, value=value)\n else:\n if v.value == value:\n v.delete()\n else:\n v.value = value\n v.save(update_fields=['value'])\n\n self.up = self.votes.count_upvotes()\n self.down = self.votes.count_downvotes()\n self.set_points()\n self.set_timepoints()\n self.save(update_fields=['up', 'down', 'points', 'timepoints'])", "def journal_write(session, k, v):\n entry = models.VppEtcdJournal(k=k, v=v)\n session.add(entry)\n session.flush()", "def test_upvote_modifies_comment_score(self):\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE)\n vote = Vote.create(comment=comment, value=1, voter=self.user)\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE + 1)", "def write_note():\n\n title_note = request.form.get(\"title\")\n note = request.form.get(\"note\")\n\n date_string = datetime.today().strftime('%Y-%m-%d')\n diary = Note(user_id=session[\"user_id\"],title_note = title_note, note_created=date_string, note=note)\n\n db.session.add(diary)\n db.session.commit()\n \n return \"note added\"", "def test_persistence_in_db(self) -> None:\n self.clear_votes()\n QuestionVote.objects.create(\n vote=1,\n question=self.question,\n user=self.user,\n )\n num_votes = QuestionVote.objects.all().count()\n self.assertEqual(num_votes, 1)", "def addProposalVote(user_id, rc_id, vote):\n\n db = getDB()\n proposal = db.proposals.find_one({\"rc_id\": rc_id})\n db.proposal_votes.update(\n {\"user_id\": user_id, \"proposal_id\": proposal[\"_id\"]},\n {\"user_id\": user_id, \"yes_vote\": vote == \"yes\", \"proposal_id\": proposal[\"_id\"]},\n upsert=True,\n )", "def save(self)->None:\n database.cursor.execute(\"INSERT INTO questions(created_date,created_by,meetup,title,body,votes,upvotes,downvotes) VALUES(%s,%s,%s,%s,%s,%s,%s,%s) RETURNING id\", (\n self.created_on,\n self.created_by,\n self.meet_up,\n self.title,\n self.body,\n self.votes,\n self.upvotes,\n self.downvotes\n ))\n super().save()", "def add(self, score, user, ip_address, cookies={}, commit=True):\n try:\n score = int(score)\n except (ValueError, TypeError):\n raise InvalidRating(\"%s is not a valid choice for %s\" % (score, self.field.name))\n \n delete = (score == 0)\n if delete and not self.field.allow_delete:\n raise CannotDeleteVote(\"you are not allowed to delete votes for %s\" % (self.field.name,))\n # ... you're also can't delete your vote if you haven't permissions to change it. I leave this case for CannotChangeVote\n \n if score < 0 or score > self.field.range:\n raise InvalidRating(\"%s is not a valid choice for %s\" % (score, self.field.name))\n\n is_anonymous = (user is None or not user.is_authenticated())\n if is_anonymous and not self.field.allow_anonymous:\n raise AuthRequired(\"user must be a user, not '%r'\" % (user,))\n \n if is_anonymous:\n user = None\n \n defaults = dict(\n score = score,\n ip_address = ip_address,\n )\n \n kwargs = dict(\n content_type = self.get_content_type(),\n object_id = self.instance.pk,\n key = self.field.key,\n user = user,\n )\n if not user:\n kwargs['ip_address'] = ip_address\n \n use_cookies = (self.field.allow_anonymous and self.field.use_cookies)\n if use_cookies:\n defaults['cookie'] = now().strftime('%Y%m%d%H%M%S%f') # -> md5_hexdigest?\n # TODO: move 'vote-%d.%d.%s' to settings or something\n cookie_name = 'vote-%d.%d.%s' % (kwargs['content_type'].pk, kwargs['object_id'], kwargs['key'][:6],) # -> md5_hexdigest?\n cookie = cookies.get(cookie_name) # try to get existent cookie value\n if not cookie:\n kwargs['cookie__isnull'] = True\n kwargs['cookie'] = cookie\n\n try:\n rating, created = Vote.objects.get(**kwargs), False\n except Vote.DoesNotExist:\n if delete:\n raise CannotDeleteVote(\"attempt to find and delete your vote for %s is failed\" % (self.field.name,))\n # print \"RATINGS_VOTES_PER_IP: \"\n # print getattr(settings, 'RATINGS_VOTES_PER_IP', RATINGS_VOTES_PER_IP)\n if getattr(settings, 'RATINGS_VOTES_PER_IP', RATINGS_VOTES_PER_IP):\n num_votes = Vote.objects.filter(\n content_type=kwargs['content_type'],\n object_id=kwargs['object_id'],\n key=kwargs['key'],\n ip_address=ip_address,\n ).count()\n if num_votes >= getattr(settings, 'RATINGS_VOTES_PER_IP', RATINGS_VOTES_PER_IP):\n raise Exception(\"Numero Maximo de votos por ip\")\n kwargs.update(defaults)\n if use_cookies:\n # record with specified cookie was not found ...\n cookie = defaults['cookie'] # ... thus we need to replace old cookie (if presented) with new one\n kwargs.pop('cookie__isnull', '') # ... and remove 'cookie__isnull' (if presented) from .create()'s **kwargs\n rating, created = Vote.objects.create(**kwargs), True\n \n has_changed = False\n if not created:\n if self.field.can_change_vote:\n has_changed = True\n self.score -= rating.score\n # you can delete your vote only if you have permission to change your vote\n if not delete:\n rating.score = score\n rating.save()\n else:\n self.votes -= 1\n rating.delete()\n else:\n raise CannotChangeVote()\n else:\n has_changed = True\n self.votes += 1\n if has_changed:\n if not delete:\n self.score += rating.score\n if commit:\n self.instance.save()\n #setattr(self.instance, self.field.name, Rating(score=self.score, votes=self.votes))\n \n defaults = dict(\n score = self.score,\n votes = self.votes,\n )\n \n kwargs = dict(\n content_type = self.get_content_type(),\n object_id = self.instance.pk,\n key = self.field.key,\n )\n \n try:\n score, created = Score.objects.get(**kwargs), False\n except Score.DoesNotExist:\n kwargs.update(defaults)\n score, created = Score.objects.create(**kwargs), True\n \n if not created:\n score.__dict__.update(defaults)\n score.save()\n \n # return value\n adds = {}\n if use_cookies:\n adds['cookie_name'] = cookie_name\n adds['cookie'] = cookie\n if delete:\n adds['deleted'] = True\n return adds", "def add_vote():\n \n\n comment_id = request.form.get(\"comment_id\")\n voted_item = request.form.get(\"voted_item\")\n\n\n comment = Comment.query.get(int(comment_id))\n \n \n vote_check = Vote.query.filter(Vote.comment_id == int(comment_id), Vote.user_id == session['user_id']).first()\n if vote_check:\n db.session.delete(vote_check)\n db.session.commit()\n else:\n vote_added = Vote(user_id = session['user_id'], comment_id = int(comment_id), up_vote = True)\n db.session.add(vote_added)\n db.session.commit()\n\n \n \n result = {'vote': comment.vote_count(), \"comment_id\": comment_id}\n return jsonify(result)", "def _force_vote(self, user, value):\n previous = 0\n if value == 0:\n # Delete any previous vote object\n for v in Vote.objects.filter(user=user, content=self):\n previous = v.value\n v.delete()\n else:\n # Create or change vote object\n v, created = Vote.objects.get_or_create(user=user, content=self)\n previous = v.value\n v.value = value\n v.save(update_fields=['value'])\n return (previous-value)*(-1)", "def store(self) -> None:\n con, c = db.connect()\n if not db.exists('SELECT * FROM answers WHERE id = ?', self.id, con=con):\n c.execute('INSERT INTO answers VALUES (?, ?, ?, ?, ?, ?, ?)', (self.id, self.answer, \n self.likes, self.created, self.tell, self.user.id, self.parent_id,))\n c.execute('UPDATE answers SET answer=?, likes=?, created=?, tell=?, user=? '+\\\n 'WHERE id = ?', (self.answer, self.likes, self.created, self.tell, \n self.user.id, self.id,))\n db.close(con)", "def test_vote_created_for_comment(self):\n comment = Comment.objects.get(body=\"987XYZ\")\n vote = Vote.create(comment=comment, value=1, voter=self.user)\n vote.save()\n self.assertIsNotNone(vote)\n self.assertIsNotNone(vote.comment)\n self.assertIsNotNone(vote.topic_post)\n self.assertIsNotNone(vote.voter)\n self.assertIs(vote.comment, comment)\n self.assertIs(vote.topic_post, comment.post)\n self.assertIs(vote.voter, self.user)", "def add_vote(self, source, target):\n\n if self.votes.get(source, None)==target:\n return # Don't need to change a thing.\n self.votes[source] = target\n\n qty = self.voted.get(target, 0)\n self.voted[target] = qty + 1\n pass", "def test_upvote_modifies_post_score(self):\n post = Post.objects.get(body=\"123ABC Body\")\n self.assertEqual(post.score, DEFAULT_SCORE)\n vote = Vote.create(post=post, value=1, voter=self.user)\n post = Post.objects.get(body=\"123ABC Body\")\n self.assertEqual(post.score, DEFAULT_SCORE + 1)", "def test_changevotes(self):\r\n request = RequestFactory()\r\n post = request.post(self.url, {'field': 'hints',\r\n 'op': 'change votes',\r\n 1: [self.problem_id.to_deprecated_string(), '1.0', '1', 5]})\r\n view.change_votes(post, self.course_id, 'hints')\r\n problem_hints = XModuleUserStateSummaryField.objects.get(field_name='hints', usage_id=self.problem_id).value\r\n # hints[answer][hint_pk (string)] = [hint text, vote count]\r\n print json.loads(problem_hints)['1.0']['1']\r\n self.assertTrue(json.loads(problem_hints)['1.0']['1'][1] == 5)", "def vote(self, option: str) -> None:\n if option not in ['yes', 'no']:\n revert(f'Option must be one of either \"yes\" or \"no\".')\n token_score = self.create_interface_score(self._token_score.get(), TokenInterface)\n address = self.tx.origin\n if address not in self._voted and token_score.balanceOf(address) == 0:\n revert(f'You must either own or be a previous owner of TAP tokens in order to cast a vote.')\n self._vote[str(address)] = option\n if address not in self._voted:\n self._voted.put(address)\n message = f\"Recorded vote of {str(address)}\"\n self.Vote(self.msg.sender, option, message)\n else:\n message = f\"{str(address)} updated vote to {option}\"\n self.Vote(address, option, message)\n if not self.vote_result():\n vote_msg = \"Overall Vote remains a 'No'.\"\n self.Vote(address, option, vote_msg)\n else:\n # In case the votes is passed, treasury is dissolved by sending all the balance to distribution contract.\n # Distribution contract will then distribute 80% to tap holders and 20% to founders.\n self._open_treasury.set(True)\n self._excess_to_distribute.set(self.icx.get_balance(self.address))\n self.__check_for_dividends()\n vote_msg = \"Vote passed! Treasury balance forwarded to distribution contract.\"\n self.Vote(address, option, vote_msg)\n self._treasury_min.set(0)", "def quickSqlWrite(self,s,v):\n self.conn()\n self.execute(s,v)\n self.commit()\n self.close()", "def add_vote():\n article_name = request.json.get('article_name','')\n username = request.json.get('username','')\n user = Participant.query.find(Participant.username == username).first_or_404()\n\n article = Article.query.filter(Article.name == article_name).first_or_404()\n group = user.group \n stack_entry = Stack.query.filter(Stack.article == exist_article & Stack.group == group).first_or_404()\n \n vote = Vote(voter = user, article = stack_entry)\n db.session.add(vote)\n db.session.commit()\n\n return f\"Vote added for {user} on {article}\", 201", "def save_record(rec,err_list=[]):\n if validate_form(rec):\n # Set the sign of qty based on transaction type\n if not rec.qty:\n rec.qty = 0\n else:\n rec.qty = abs(rec.qty)\n \n if rec.trx_type.lower() == \"remove\" and rec.qty != 0:\n rec.qty = rec.qty * -1\n \n Transaction(g.db).save(rec)\n try:\n g.db.commit()\n #Save the date and comment to session\n session['last_trx'] = {\"created\":rec.created,\"note\":rec.note}\n return True\n \n except Exception as e:\n err_list.append(printException('Error attempting to save Transaction record',str(e)))\n \n g.db.rollback()\n return False", "def Save(self) -> None:\n self.__conn.commit()", "def test_vote_created_for_post(self):\n post = Post.objects.get(body=\"123ABC Body\")\n vote = Vote.create(post=post, value=1, voter=self.user)\n vote.save()\n self.assertIsNotNone(vote)\n self.assertIsNotNone(vote.post)\n self.assertIsNotNone(vote.topic_post)\n self.assertIsNotNone(vote.voter)\n self.assertIs(vote.post, post)\n self.assertIs(vote.topic_post, post)\n self.assertIs(vote.voter, self.user)", "async def do_post(self, vote_plus, vote_minus,\n UPDATE_PLUS, UPDATE_MINUS, NO_UPDATE):\n tuple_plus = await self.db.fetchrow((\n \"select (data) from secret where data->>0 = '{}'\"\n ).format(vote_plus))\n tuple_minus = await self.db.fetchrow((\n \"select (data) from secret where data->>0 = '{}'\"\n ).format(vote_minus))\n id_plus = json.loads(tuple_plus[0])[2]\n id_minus = json.loads(tuple_minus[0])[2]\n if vote_plus and vote_minus:\n await self.db.execute((\n \"delete from secret where data->>0 = '{}'\"\n ).format(vote_plus))\n await self.db.execute((\n \"delete from secret where data->>0 = '{}'\"\n ).format(vote_minus))\n\n if id_plus and id_minus:\n await self.update_rating(id_plus, +1)\n await self.update_rating(id_minus, -1)\n\n await self.update_votes(id_plus, UPDATE_PLUS, NO_UPDATE)\n await self.update_votes(id_minus, NO_UPDATE, UPDATE_MINUS)\n\n await self.update_stats(0, NO_UPDATE, UPDATE_PLUS)", "def persist_record(conn,data,tb_name):\n\tquery_param\t\t= tuple(list(map(lambda k : data[k],col_list[tb_name])))\n\texecute_query(conn,query_strings[tb_name],query_param)\n\treturn", "def test_downvote_modifies_comment_score(self):\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE)\n vote = Vote.create(comment=comment, value=-1, voter=self.user)\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE - 1)", "def create_vote(self, data, header):\n return self.client.post(\n path='/api/v2/votes/', data=json.dumps(data), content_type='application/json', headers=header)", "def upgrade():\n op.add_column(\n 'wish', sa.Column(\n 'admin_votes_num', sa.Integer(), server_default='0',\n nullable=True))\n op.add_column(\n 'wish', sa.Column('real_votes_num', sa.Integer(), nullable=True))\n op.execute('UPDATE wish SET real_votes_num = votes_num')\n op.drop_column('wish', 'votes_num')", "def process_vote(self, comment_id, username, value):\n raise NotImplementedError()", "def save_query(self):\r\n self.conn.commit()", "def test_missing_vote_value(self) -> None:\n self.clear_votes()\n try:\n message = \"successfully voted\"\n QuestionVote.objects.create(\n question=self.question,\n user=self.user,\n )\n except django.db.IntegrityError:\n message = 'Error occured during creation of vote'\n finally:\n self.assertEqual(\n 'Error occured during creation of vote', message)", "def record(self):\n # TODO: record the data", "def commit_record( self, update_photo_state=True ):\n\n # update the record based on what's currently visible if requested.\n if update_photo_state:\n self.record[\"modified_time\"] = time.mktime( time.gmtime() )\n self.record[\"state\"] = self.photoProcessingStateComboBox.currentText()\n self.record[\"tags\"] = list( map( lambda x: x.strip(),\n self.photoTagsLineEdit.text().split( \", \" ) ) )\n\n self.db.mark_data_dirty()\n\n super().commit_record()", "def addNote(self, note):\n logger.debug(\"Func: addNote\")\n\n if not self._currentBaseSceneName:\n logger.warning(\"No Base Scene file selected\")\n return\n if self._currentVersionIndex == -1:\n logger.warning(\"No Version selected\")\n return\n now = datetime.datetime.now().strftime(\"%d/%m/%Y-%H:%M\")\n self._currentNotes = \"%s\\n[%s] on %s\\n%s\\n\" % (self._currentNotes, self.currentUser, now, note)\n self._currentSceneInfo[\"Versions\"][self._currentVersionIndex-1][\"Note\"] = self._currentNotes\n self._dumpJson(self._currentSceneInfo, self._baseScenesInCategory[self._currentBaseSceneName])", "def note():", "def put_note(note):\n if 'id' not in note:\n note['id'] = new_note_id()\n if 'ctime' not in note:\n note['ctime'] = util.utcnow_as_str()\n db.getdb().note.update({'id': note['id']}, note, upsert=True)\n return note", "def trackNewPayment(violation):\n data = db.Database(filename=\"IFB299.db\")\n payment = dict(Citation_Number = violation[\"Citation_Number\"], Citation_Type = violation[\"Violation_Type\"], Payment_status = \"Pending\")\n data.insertFinePayment(payment)\n data.close()", "def record_gauge(self, name, value, tags=None):\n identity = self.create_identity(name, tags)\n with self._lock:\n self._batch[identity] = value\n self._timestamps[identity] = int(time.time() * 1000.0)", "def mark():\n with CONNECTION:\n CURSOR.execute('INSERT INTO marks (mark) VALUES (?)', str(1))\n return '1'", "def toggle_vote(self):\n\n self.vote = 1 - self.vote", "def add_note(self, note):\n now = timezone.now()\n self.logger.info(\n \"(%s)(%s)(%s) - %s\" % (now, self.task_type, self.task.uuid, note)\n )\n note = \"%s - (%s)\" % (note, now)\n self.task.add_task_note(note)", "def commit(self):", "def save_article(title,image,description,content,pub_date,news_url,note,user):\n article = Article(\n title=title,\n image=image,\n description=description,\n content = content,\n pub_date=datetime.strptime(pub_date, \"%Y-%m-%dT%H:%M:%SZ\"),\n news_url=news_url\n )\n \n db.session.add(article)\n \n #testing\n print(article)\n \n # 2.0 add rating and notes during save event\n #creating the relationship between user and the saved article\n saved_article = Saved(\n user=user,\n article=article,\n notes=note\n )\n # notes=notes,\n # rating=rating)\n \n db.session.add(saved_article)\n db.session.commit()\n\n #testing\n print(saved_article)", "def store(self, quantitiy):\n self._stored = self._stored + quantitiy", "def dump_vote_tx(start, end):\n\tvote = {}\n\tvote['votes'] = []\n\n\twidgets = [Percentage(), # Setting how we wan the progress bar to look\n\t\t\t ' ', Bar(),\n\t\t\t ' ', ETA()]\n\n\tinput_range = end - start\n\tscrape_range_ref = input_range + 1\n\tpbar = ProgressBar(widgets=widgets, maxval=scrape_range_ref).start() #Prepare the progress bar\n\tprogress_iterator = 0\n\n\tfor x in range(start, end):\n\t\tpbar.update(progress_iterator + 1) # Display incremented progress\n\t\tprogress_iterator += 1 # Iterate the progress bar for next iteration\n\n\t\tobject_id = \"1.11.\" + str(x)\n\t\ttry:\n\t\t\tretrieved_object = bitshares_full_node.rpc.get_objects([object_id])[0]\n\t\texcept:\n\t\t\tcontinue\n\n\t\tif retrieved_object is not None:\n\t\t\tif 'new_options' in retrieved_object.keys():\n\t\t\t\t# Log details!\n\t\t\t\tprint(\"Vote!\")\n\t\t\t\tvote['votes'].append(retrieved_object)\n\t\t\t#else:\n\t\t\t\t#print(\"Not a vote!\")\n\treturn vote", "def save(self, node):\n if node:\n nextId = node.nref.nodeId if node.nref else None\n record = dict(nextId=nextId, childId=node.childId, label=node.label)\n if not node.nodeId:\n node.nodeId = self.db.insert(self.tableName, record)\n self.save(node.pref)\n else:\n self.db.update(node.nodeId, self.tableName, record)", "def vote(self, data, suffix=''): # pylint: disable=unused-argument\n # Here is where we would prevent a student from voting twice, but then\n # we couldn't click more than once in the demo!\n #\n # if self.voted:\n # log.error(\"cheater!\")\n # return\n\n votes = json.load(self.fs.open(u\"thumbsvotes.json\"))\n self.upvotes = votes['up']\n self.downvotes = votes['down']\n\n if data['voteType'] not in ('up', 'down'):\n log.error('error!')\n return\n\n if data['voteType'] == 'up':\n self.upvotes += 1\n else:\n self.downvotes += 1\n\n with self.fs.open(u'thumbsvotes.json', 'wb') as file_output:\n file_output.write(\n json.dumps({'up': self.upvotes, 'down': self.downvotes}).encode()\n )\n\n self.voted = True\n\n return {'up': self.upvotes, 'down': self.downvotes}", "def update_vote(self, vote):\n enemy = Enemy(vote.target, history={}).update_hostility(hostility=4, message=vote)\n self.update_enemy(enemy)", "def up_vote(cls, user, message):\r\n pass", "def run(self):\n self.db.table('points').insert({\n 'name': 'biblioteca',\n 'rfid': '123456'\n })", "def cmd_comment_vote(client, args):\n comment_vote = client.comment_vote(args.comment_id, args.vote)\n generate_output({'comment_vote': comment_vote})", "def dbWrite(dbPoint, formatedValue):\n raise NotImplementedError('dbWrite in simu mode')", "def test_update_note(self):\n pass", "def save_event(self, data):\n rdb.table(self.rdb_table).insert(data)", "def commitToDatabase(self, tiltseriesdata):\n\t\treturn", "def add_note():\n pass", "def add_note (self,\r\n index,\r\n keyset=None,\r\n text=None,\r\n metadata=None,\r\n note=None,\r\n keyset_only=None,\r\n meta_only=None,\r\n text_only=None):\r\n\r\n # USING SHELF\r\n\r\n if note:\r\n if self.using_shelf:\r\n self.note_dict[str(index)] = note\r\n text = note.text\r\n keyset = note.keyset\r\n metadata = note.meta\r\n\r\n\r\n elif keyset_only:\r\n if self.using_shelf:\r\n self.note_dict[str(index)].keyset = keyset_only\r\n keyset = keyset_only\r\n elif text_only:\r\n if self.using_shelf:\r\n self.note_dict[str(index)].text = text_only\r\n text = text_only\r\n elif meta_only:\r\n if self.using_shelf:\r\n self.note_dict[str(index)].meta = meta_only\r\n\r\n metadata = meta_only\r\n\r\n else:\r\n if self.using_shelf:\r\n self.note_dict[str(index)] = Note(keyset,\r\n text,\r\n metadata)\r\n if not text:\r\n text = ''\r\n if not keyset:\r\n keyset = set()\r\n if not metadata:\r\n metadata = {'size':self.defaults.get('size'),\r\n 'date':[str(datetime.datetime.now())],\r\n 'user':self.defaults.get('user')}\r\n\r\n # USING DATABASE\r\n if self.using_database:\r\n aprint('ADDING NOTE')\r\n\r\n text = text.replace(\"'\",\"''\")\r\n db_cursor.execute(\"SELECT * FROM notes\")\r\n\r\n\r\n value_tuple = (notebookname,str(index),text,metadata['size'],metadata['user'])\r\n db_cursor.execute(\"INSERT OR REPLACE\"\r\n +\" INTO notes\"\r\n +\" (notebook, note_index, note_body, size, user)\"\r\n +\" VALUES (?,?,?,?,?);\",\r\n value_tuple)\r\n if not isinstance(metadata['date'],list):\r\n metadata['date'] = [metadata['date']]\r\n metadata['date'] = [str(d) for d in metadata['date']]\r\n\r\n for d_temp in metadata['date']:\r\n\r\n value_tuple = (notebookname, str(index), d_temp,)\r\n db_cursor.execute(\"INSERT OR REPLACE\"\r\n +\" INTO timestamps\"\r\n +\" (notebook, note_index, timestamp)\"\r\n +\" VALUES (?,?,?);\",\r\n value_tuple)\r\n\r\n\r\n for k_temp in keyset:\r\n value_tuple = (notebookname, str(index), k_temp,)\r\n db_cursor.execute(\"INSERT OR REPLACE \"\r\n +\"INTO all_note_keys \"\r\n +\"(notebook, note_index, keyword)\"\r\n +\" VALUES (?,?,?);\",\r\n value_tuple)", "def edit_current_note():\n note_id = request.form.get(\"note_id\")\n\n edited_note = Note.query.get(note_id)\n\n edited_note.title_note = request.form.get(\"title\")\n edited_note.note = request.form.get(\"note\")\n\n\n db.session.commit()\n \n return \"note edited\"", "def test_add_write_survey_node_after_mv(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.write_survey=true\", \"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n for i in range(1000, 1100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n for i in range(1100):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])", "def save(self):\n db.session.commit()", "def save(self):\n self.session.commit()", "def save(self):\n value = self.volume()\n session = self.resource.conn.session\n # self.artifice.\n try:\n tenant_id = self.resource[\"tenant_id\"]\n except KeyError:\n tenant_id = self.resource[\"project_id\"]\n resource_id = self.resource[\"resource_id\"]\n\n tenant = session.query(tenants.Tenant).get(tenant_id)\n\n if tenant is None:\n res = resources.Resource()\n tenant = tenants.Tenant()\n tenant.id = tenant_id\n\n res.id = resource_id\n res.tenant = tenant\n session.add(res)\n session.add(tenant)\n else:\n try:\n res = session.query(resources.Resource).filter(resources.Resource.id == resource_id)[0]\n tenant = res.tenant\n except IndexError:\n res = resources.Resource()\n tenant = tenants.Tenant()\n tenant.id = tenant_id\n res.id = resource_id\n res.tenant = tenant\n session.add(res)\n session.add(tenant)\n\n this_usage = usage.Usage(\n res,\n tenant,\n value,\n self.start,\n self.end,\n )\n session.add(this_usage)\n session.commit() # Persist to Postgres", "def commit(self, snapshot):\n _, checkpoint_id = snapshot\n self.chaindb.commit(checkpoint_id)", "def save_data(self, record):\n self.dbm.addRecord(record)", "def test_multiple_vote(self) -> None:\n try:\n message = \"successfully voted\"\n QuestionVote.objects.create(\n vote=1,\n question=self.question,\n user=self.user,\n )\n except django.db.IntegrityError:\n message = 'Error occured during creation of vote'\n finally:\n self.assertEqual(\n 'Error occured during creation of vote', message)", "def tally_vote(self, data):\r\n if self.user_voted:\r\n return {'error': 'Sorry, but you have already voted!'}\r\n ans = data['answer']\r\n if not self.validate_answer(ans):\r\n # Uh oh. Invalid answer.\r\n log.exception('Failure in hinter tally_vote: Unable to parse answer: {ans}'.format(ans=ans))\r\n return {'error': 'Failure in voting!'}\r\n hint_pk = str(data['hint'])\r\n # We use temp_dict because we need to do a direct write for the database to update.\r\n temp_dict = self.hints\r\n try:\r\n temp_dict[ans][hint_pk][1] += 1\r\n except KeyError:\r\n log.exception('''Failure in hinter tally_vote: User voted for non-existant hint:\r\n Answer={ans} pk={hint_pk}'''.format(ans=ans, hint_pk=hint_pk))\r\n return {'error': 'Failure in voting!'}\r\n self.hints = temp_dict\r\n # Don't let the user vote again!\r\n self.user_voted = True\r\n\r\n # Return a list of how many votes each hint got.\r\n pk_list = json.loads(data['pk_list'])\r\n hint_and_votes = []\r\n for answer, vote_pk in pk_list:\r\n if not self.validate_answer(answer):\r\n log.exception('In hinter tally_vote, couldn\\'t parse {ans}'.format(ans=answer))\r\n continue\r\n try:\r\n hint_and_votes.append(temp_dict[answer][str(vote_pk)])\r\n except KeyError:\r\n log.exception('In hinter tally_vote, couldn\\'t find: {ans}, {vote_pk}'.format(\r\n ans=answer, vote_pk=str(vote_pk)))\r\n\r\n hint_and_votes.sort(key=lambda pair: pair[1], reverse=True)\r\n # Reset self.previous_answers and user_submissions.\r\n self.previous_answers = []\r\n self.user_submissions = []\r\n return {'hint_and_votes': hint_and_votes}", "def send_note(line, dt):\n # example from log.txt:\n # [2021-04-20 14:55:02.085] miner speed 10s/60s/15m 77.37 76.91 n/a H/s max 77.87 H/s\n hash_rate = line[54:line.find(\" \", 54) - 1]\n ms_time = dt.timestamp() * 1000\n\n req = {\"req\": \"note.add\"}\n req[\"file\"] = \"crypto.qo\"\n req[\"body\"] = {\"rate\": hash_rate, \"time\": ms_time}\n req[\"sync\"] = True\n rsp = card.Transaction(req)\n # print(rsp) # debug/print request", "def save(self):\n self.__db.commit()", "def _save (self, expires):\n\n pickled_data = pickle.dumps (self._data, self.pickle_protocol)\n\n self._delete ()\n self._exec (\n \"\"\"\\\n insert into table_name (id, expires, data)\n values (%(id)s, %(expires)s, %(data)s)\n \"\"\",\n data = pickled_data,\n expires = expires\n )", "def dbcommit(self):\n self.con.commit()", "def write(self):\n if not self._table: raise ValueError ( \"_table is Null\" )\n if self._isnew:\n for m in self._modified_values:\n self._modified_values[m] = self._table[m].val_py2sql(self._modified_values[m])\n \n try:\n rec = CFG.CX.insert ( CFG.DB.SCHEMA + \".\" + self._table.name,\n self._modified_values )\n #this will automatically re-read the data from the db, to take all changes\n #done by triggers and default values into account.\n self._objectid = rec['objectid']\n\n #print \"Record # {0} inserted into {1}.\".format(self._objectid, self._table.name)\n self.raiseEvent ( \"record_added\", self )\n \n except pg.DatabaseError, e:\n print \"Error inserting record.\"\n raise Record.DataManipulationError ( \"Inserting a new record into '{0}'\".format(self._table.name),\n str(self._modified_values),\n e)\n elif self._ismodified:\n \n for m in self._modified_values: \n self._modified_values[m] = self._table[m].val_py2sql(self._modified_values[m])\n \n self._modified_values['objectid'] = self._objectid\n del self.TextCache[self._objectid]\n try:\n rec = CFG.CX.update ( CFG.DB.SCHEMA + \".\" + self._table.name,\n self._modified_values )\n self.read() \n self.raiseEvent ( \"record_saved\", self )\n except pg.DatabaseError, e:\n print \"Error updating record\"\n raise Record.DataManipulationError ( \"Updating record {1} of '{0}'\".format(self._table.name, self._objectid),\n str(self._modified_values),\n e)", "def upsert_individual_vote(self, vote_tuple):\n\n return self.execute(\"\"\"\n INSERT INTO votes (vote_id, person_bioguide_id, vote)\n VALUES (%s, %s, %s)\n\n ON CONFLICT (vote_id, person_bioguide_id) DO UPDATE\n SET (vote) = (EXCLUDED.vote)\n WHERE votes.vote_id = EXCLUDED.vote_id\n AND votes.person_bioguide_id = EXCLUDED.person_bioguide_id\n \"\"\", vote_tuple)", "async def update_votes(self, id, up, down):\n # update stickers set\n # data =\n # jsonb_set(\n # jsonb_set(\n # data,\n # '{5}',\n # (select (((data->>5)::int+1)::text)::jsonb from stickers\n # where data->>0 = '61015')\n # ),\n # '{6}',\n # (select (((data->>6)::int+1)::text)::jsonb from stickers\n # where data->>0 = '61015')\n # )\n # where data->>0 = '61015';\n result = await self.db.fetchval((\n 'select data->>5 from stickers where data->>0 = \\'{0}\\''\n ).format(id)\n )\n if result:\n sql = (\"update stickers set\"\n \" data =\"\n \" jsonb_set(\"\n \" jsonb_set(\"\n \" data,\"\n \" '{{4}}',\"\n \" (select \"\n \" (((data->>4)::int+{1})::text)::jsonb \"\n \" from stickers where data->>0 = '{0}')\"\n \" ),\"\n \" '{{5}}',\"\n \" (select \"\n \" (((data->>5)::int+{2})::text)::jsonb \"\n \" from stickers where data->>0 = '{0}')\"\n \" )\"\n \" where data->>0 = '{0}';\")\n sql = sql.format(id, up, down)\n else:\n sql = (\"update stickers set \"\n \" data = data || '[1,1]'\"\n \" where data->>0 = '{0}';\").format(id)\n await self.db.execute(sql)", "def _commit_now(self):\n self._database.commit()", "def save(self):\n self.db.commit()", "def record_count(self, name, value, tags=None):\n identity = self.create_identity(name, tags, \"count\")\n with self._lock:\n self._batch[identity] = self._batch.get(identity, 0) + value", "def updateNote(self, authenticationToken, note):\r\n pass", "def save(self, computed = True):\n\t\tself.cart.cart_history_set.create(price = self.price, computed = computed)", "def insert_data(instance_id, database_id):\n spanner_client = spanner.Client()\n instance = spanner_client.instance(instance_id)\n database = instance.database(database_id)\n\n with database.batch() as batch:\n batch.insert(\n table='achieve',\n columns=('user_id', 'is_buy_completed', 'is_fta_completed','is_liked_completed', 'is_list_completed', 'is_profile_completed','is_registeration_completed', 'is_sold_completed', 'is_ss_completed',),\n values=[\n (1, 1, 1, 1, 1, 1, 1, 1, 1),\n (2, 1, 1, 1, 1, 0, 0, 0, 0),\n (3, 1, 0, 1, 0, 1, 0, 1, 0),\n (4, 0, 1, 0, 1, 0, 1, 0, 1),\n (5, 0, 0, 0, 0, 0, 0, 0, 0)])\n\n print('Inserted data.')", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def _db(self, value):", "def _write_to_db(self, instance: DBModelInstance) -> None:\n self.db.session.add(instance)\n self.db.session.commit()" ]
[ "0.65922546", "0.65765816", "0.6408348", "0.631297", "0.62766236", "0.61878365", "0.5687511", "0.5568119", "0.55055785", "0.54527164", "0.5442264", "0.5370488", "0.53575575", "0.5343209", "0.5315146", "0.5259986", "0.52259654", "0.5187528", "0.5185984", "0.5178056", "0.51677734", "0.5145494", "0.51358426", "0.51201797", "0.5082618", "0.50788957", "0.5069621", "0.50370055", "0.50024027", "0.49928632", "0.49763533", "0.49700895", "0.4943927", "0.48957795", "0.48850504", "0.48815477", "0.48584625", "0.48541105", "0.484603", "0.48436832", "0.48165888", "0.48139614", "0.4812221", "0.4808258", "0.48015487", "0.47929394", "0.4790385", "0.47849104", "0.47777936", "0.477198", "0.47709477", "0.47649035", "0.47604144", "0.47528195", "0.47526407", "0.47524154", "0.47493026", "0.47490698", "0.47373316", "0.4730068", "0.472842", "0.47269264", "0.47131875", "0.4708799", "0.47077882", "0.4705311", "0.47026226", "0.47023293", "0.46979254", "0.46937928", "0.46802413", "0.46794713", "0.4675484", "0.46749613", "0.4673586", "0.46734866", "0.46645254", "0.4663226", "0.46582472", "0.4647164", "0.46310762", "0.46271965", "0.46187174", "0.46154594", "0.461294", "0.46127388", "0.46119747", "0.46110442", "0.46044025", "0.4603734", "0.4599382", "0.4599382", "0.4599382", "0.4599382", "0.4599382", "0.4599382", "0.4599382", "0.4599382", "0.45980474", "0.4597571" ]
0.64803696
2
This is an ADMM solver for the (Latent variable) Single Graphical Lasso problem (SGL). If ``latent=False``, this function solves
def ADMM_SGL(S, lambda1, Omega_0, Theta_0=np.array([]), X_0=np.array([]), rho=1., max_iter=1000, tol=1e-7, rtol=1e-4, stopping_criterion='boyd',\ update_rho=True, verbose=False, measure=False, latent=False, mu1=None): assert Omega_0.shape == S.shape assert S.shape[0] == S.shape[1] assert lambda1 > 0 assert stopping_criterion in ["boyd", "kkt"] if latent: assert mu1 is not None assert mu1 > 0 (p, p) = S.shape assert rho > 0, "ADMM penalization parameter must be positive." # initialize Omega_t = Omega_0.copy() if len(Theta_0) == 0: Theta_0 = Omega_0.copy() if len(X_0) == 0: X_0 = np.zeros((p, p)) Theta_t = Theta_0.copy() L_t = np.zeros((p, p)) X_t = X_0.copy() runtime = np.zeros(max_iter) residual = np.zeros(max_iter) status = '' if verbose: print("------------ADMM Algorithm for Single Graphical Lasso----------------") if stopping_criterion == 'boyd': hdr_fmt = "%4s\t%10s\t%10s\t%10s\t%10s" out_fmt = "%4d\t%10.4g\t%10.4g\t%10.4g\t%10.4g" print(hdr_fmt % ("iter", "r_t", "s_t", "eps_pri", "eps_dual")) elif stopping_criterion == 'kkt': hdr_fmt = "%4s\t%10s" out_fmt = "%4d\t%10.4g" print(hdr_fmt % ("iter", "kkt residual")) ################################################################## ### MAIN LOOP STARTS ################################################################## for iter_t in np.arange(max_iter): if measure: start = time.time() # Omega Update W_t = Theta_t - L_t - X_t - (1 / rho) * S eigD, eigQ = np.linalg.eigh(W_t) Omega_t_1 = Omega_t.copy() Omega_t = phiplus(beta=1 / rho, D=eigD, Q=eigQ) # Theta Update Theta_t = prox_od_1norm(Omega_t + L_t + X_t, (1 / rho) * lambda1) # L Update if latent: C_t = Theta_t - X_t - Omega_t # C_t = (C_t.T + C_t)/2 eigD1, eigQ1 = np.linalg.eigh(C_t) L_t = prox_rank_norm(C_t, mu1/rho, D=eigD1, Q=eigQ1) # X Update X_t = X_t + Omega_t - Theta_t + L_t if measure: end = time.time() runtime[iter_t] = end - start # Stopping criterion if stopping_criterion == 'boyd': r_t,s_t,e_pri,e_dual = ADMM_stopping_criterion(Omega_t, Omega_t_1, Theta_t, L_t, X_t,\ S, rho, tol, rtol, latent) # update rho if update_rho: if r_t >= 10*s_t: rho_new = 2*rho elif s_t >= 10*r_t: rho_new = 0.5*rho else: rho_new = 1.*rho # rescale dual variables X_t = (rho/rho_new)*X_t rho = rho_new residual[iter_t] = max(r_t,s_t) if verbose: print(out_fmt % (iter_t,r_t,s_t,e_pri,e_dual)) if (r_t <= e_pri) and (s_t <= e_dual): status = 'optimal' break elif stopping_criterion == 'kkt': eta_A = kkt_stopping_criterion(Omega_t, Theta_t, L_t, rho * X_t, S, lambda1, latent, mu1) residual[iter_t] = eta_A if verbose: print(out_fmt % (iter_t,eta_A)) if eta_A <= tol: status = 'optimal' break ################################################################## ### MAIN LOOP FINISHED ################################################################## # retrieve status (partially optimal or max iter) if status != 'optimal': if stopping_criterion == 'boyd': if (r_t <= e_pri): status = 'primal optimal' elif (s_t <= e_dual): status = 'dual optimal' else: status = 'max iterations reached' else: status = 'max iterations reached' print(f"ADMM terminated after {iter_t+1} iterations with status: {status}.") ### CHECK FOR SYMMETRY if abs((Omega_t).T - Omega_t).max() > 1e-5: warnings.warn(f"Omega variable is not symmetric, largest deviation is {abs((Omega_t).T - Omega_t).max()}.") if abs((Theta_t).T - Theta_t).max() > 1e-5: warnings.warn(f"Theta variable is not symmetric, largest deviation is {abs((Theta_t).T - Theta_t).max()}.") if abs((L_t).T - L_t).max() > 1e-5: warnings.warn(f"L variable is not symmetric, largest deviation is {abs((L_t).T - L_t).max()}.") ### CHECK FOR POSDEF D = np.linalg.eigvalsh(Theta_t - L_t) if D.min() <= 0: print( f"WARNING: Theta (Theta - L resp.) is not positive definite. Solve to higher accuracy! (min EV is {D.min()})") if latent: D = np.linalg.eigvalsh(L_t) if D.min() < -1e-8: print(f"WARNING: L is not positive semidefinite. Solve to higher accuracy! (min EV is {D.min()})") if latent: sol = {'Omega': Omega_t, 'Theta': Theta_t, 'L': L_t, 'X': X_t} else: sol = {'Omega': Omega_t, 'Theta': Theta_t, 'X': X_t} if measure: info = {'status': status, 'runtime': runtime[:iter_t+1], 'residual': residual[:iter_t+1]} else: info = {'status': status} return sol, info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solver_mll(X, y, C, S, alpha=0.1, max_iter=1000, tol=1e-4, positive=False):\n n_tasks, n_samples, n_features = X.shape\n lasso = Lasso(alpha=alpha, fit_intercept=False, positive=positive)\n lasso_p = Lasso(alpha=alpha / n_tasks, fit_intercept=False,\n positive=True)\n old_theta = C[:, None] * S\n\n for i in range(max_iter):\n W = X * C[None, None, :]\n for k in range(n_tasks):\n lasso.fit(W[k], y[k])\n S[:, k] = lasso.coef_\n Z = S.T[:, None, :] * X\n Z = Z.reshape(n_tasks * n_samples, n_features)\n lasso_p.fit(Z, y.flatten())\n C = lasso_p.coef_\n theta = C[:, None] * S\n dll = abs(theta - old_theta).max()\n dll /= max(abs(theta).max(), abs(old_theta).max(), 1.)\n old_theta = theta.copy()\n\n if dll < tol:\n break\n\n if i == max_iter - 1:\n warnings.warn('Objective did not converge.' +\n ' You might want' +\n ' to increase the number of iterations.' +\n ' Fitting data with very small alpha' +\n ' may cause precision problems.',\n ConvergenceWarning)\n return C, S, i", "def LinearModel(G,x=0,i0=0.1,L1='L',D=-0.01,tf=5,Nt=1000):\r\n #set up graph atteributes\r\n N = G.number_of_nodes()\r\n degree_arr=np.asarray(G.degree(),dtype=int)[:,1]\r\n iarray = np.zeros((Nt+1,N))\r\n tarray = np.linspace(0,tf,Nt+1)\r\n #calucalte operaters and set intial conditions\r\n A=nx.adjacency_matrix(G)\r\n L=-D*(scipy.sparse.diags(degree_arr)-A)\r\n Ls=-D*(scipy.sparse.diags(np.ones(N))-scipy.sparse.diags(1/degree_arr).dot(A))\r\n\r\n y0=np.zeros(N)\r\n y0[x]=i0\r\n #set up operators\r\n\r\n if L1=='Ls':\r\n L=Ls\r\n elif L1=='Lst':\r\n L=Ls.transpose()\r\n\r\n #define operators\r\n def Lap(y,t):\r\n return scipy.sparse.csr_matrix.__mul__(L,y)\r\n\r\n iarray[:,:]=scipy.integrate.odeint(Lap,y0,tarray)\r\n\r\n return iarray", "def __solve_full_linear_problem(self):\n samples = []\n\n for news in self.news_pool:\n samples += [news.sampled_quality] * self.layout_slots\n\n self.full_C = np.array(samples) * self.full_lambdas\n\n linear_problem = opt.linprog(A_ub=self.full_A, b_ub=self.full_B, c=self.full_C)\n slots_assegnation_probabilities = []\n slot_counter = 0\n tmp_slot_probabilities = []\n while slot_counter < self.layout_slots:\n i = slot_counter\n while i < len(linear_problem.x):\n tmp_slot_probabilities.append(np.abs(linear_problem.x[i]))\n i += self.layout_slots\n slots_assegnation_probabilities.append(tmp_slot_probabilities.copy())\n tmp_slot_probabilities.clear()\n slot_counter += 1\n\n result = self.__de_randomize_LP(self.news_pool, slots_assegnation_probabilities, self.lp_rand_tech)\n\n return result", "def fit(self):\n import networkx as nx\n import torch\n # Step 1. Calculate the Laplacian matrix\n L = nx.laplacian_matrix(self.Graph)\n nodelist = self.Graph.nodes()\n K = L.shape[0]\n\n # Step 2. Get the data in the right format \n cache = self.loss_function(self.data_train)\n \n # Step 3. Compute the proximal loss\n def proximal_loss(t, nu, warm_start, pool, cache=cache):\n XtX = cache['XtX']\n XtY = cache['XtY']\n n = cache['n']\n # LU = X'X + 0.5 * t * I\n Alu = torch.lu(XtX + 1./(2 * t) * torch.eye(n).unsqueeze(0).double())\n b = XtY + 1./(2 * t) * torch.from_numpy(nu)\n x = torch.lu_solve(b, *Alu).numpy()\n return x\n\n def proximal_residual(t, nu, warm_start, pool, lambda_val=1e-4):\n return nu / (1. + t * lambda_val)\n\n G_to_data = self._graph_to_data(cache['alpha_shape'])\n result, info = self._stratified_model_admm(shape=cache['shape'], \\\n Lap=L, \\\n loss_proximal_func=proximal_loss, \\\n regulariser_proximal_func=proximal_residual, \\\n graph_data=G_to_data)\n print(info)\n return self._output_to_graph(result)", "def latent_grad(mu_S, kern, Z, dL_dpsi0, dL_dpsi1, dL_dpsi2):\r\n mu, log_S = mu_S.reshape(2, 1, -1)\r\n S = np.exp(log_S)\r\n\r\n mu0, S0 = kern.dpsi0_dmuS(dL_dpsi0, Z, mu, S)\r\n mu1, S1 = kern.dpsi1_dmuS(dL_dpsi1, Z, mu, S)\r\n mu2, S2 = kern.dpsi2_dmuS(dL_dpsi2, Z, mu, S)\r\n\r\n dmu = mu0 + mu1 + mu2 - mu\r\n # dS = S0 + S1 + S2 -0.5 + .5/S\r\n dlnS = S * (S0 + S1 + S2 - 0.5) + .5\r\n\r\n return -np.hstack((dmu.flatten(), dlnS.flatten()))", "def block_SGL(S, lambda1, Omega_0, Theta_0=None, X_0=None, rho=1., max_iter=1000, \n tol=1e-7, rtol=1e-3, stopping_criterion=\"boyd\",\n update_rho=True, verbose=False, measure=False):\n assert Omega_0.shape == S.shape\n assert S.shape[0] == S.shape[1]\n assert lambda1 > 0\n\n (p, p) = S.shape\n\n if Theta_0 is None:\n Theta_0 = Omega_0.copy()\n if X_0 is None:\n X_0 = np.zeros((p, p))\n\n # compute connected components of S with lambda_1 threshold\n numC, allC = get_connected_components(S, lambda1)\n\n allOmega = list()\n allTheta = list()\n allX = list()\n\n for i in range(numC):\n C = allC[i]\n\n # single node connected components have a closed form solution, see Witten, Friedman, Simon \"NEW INSIGHTS FOR THE GRAPHICAL LASSO \"\n if len(C) == 1:\n # we use the OFF-DIAGONAL l1-penalty, otherwise it would be 1/(S[C,C]+lambda1)\n closed_sol = 1 / (S[C, C])\n\n allOmega.append(closed_sol)\n allTheta.append(closed_sol)\n allX.append(np.array([0]))\n\n\n # else solve Graphical Lasso for the corresponding block\n else:\n block_S = S[np.ix_(C, C)]\n block_sol, block_info = ADMM_SGL(S=block_S, lambda1=lambda1, Omega_0=Omega_0[np.ix_(C, C)],\n Theta_0=Theta_0[np.ix_(C, C)], X_0=X_0[np.ix_(C, C)], tol=tol, rtol=rtol,\n stopping_criterion=stopping_criterion, update_rho=update_rho,\n rho=rho, max_iter=max_iter, verbose=verbose, measure=measure)\n\n allOmega.append(block_sol['Omega'])\n allTheta.append(block_sol['Theta'])\n allX.append(block_sol['X'])\n\n # compute inverse permutation\n per = np.hstack(allC)\n per1 = invert_permutation(per)\n\n # construct solution by applying inverse permutation indexing\n sol = dict()\n sol['Omega'] = block_diag(*allOmega)[np.ix_(per1, per1)]\n sol['Theta'] = block_diag(*allTheta)[np.ix_(per1, per1)]\n sol['X'] = block_diag(*allX)[np.ix_(per1, per1)]\n\n return sol", "def set_DirichletSS_sparse(self):\n \n \n self.set_Dirichlet_vessel(self.inlet)\n\n\n self.tissue_consumption(self.Mt)\n \n #REINITIALISATION OF THE VECTOR OF TISSUE PHI!!!\n self.phi_t=np.zeros(len(self.phit))\n \n self.set_Dirichlet_north(0)\n self.set_Dirichlet_east(0)\n self.set_Dirichlet_west(0)\n \n self.A.eliminate_zeros()", "def solver_mll(X, y, alpha=0.1, C=None, S=None, callback=None, positive=False,\n maxiter=1000, tol=1e-4, compute_obj=False):\n n_tasks, n_samples, n_features = X.shape\n lasso = Lasso(alpha=alpha, fit_intercept=False,\n positive=positive)\n lasso_p = Lasso(alpha=alpha / n_tasks, fit_intercept=False,\n positive=True)\n if S is None:\n S = np.zeros((n_features, n_tasks))\n if C is None:\n C = np.ones(n_features)\n else:\n if C.max() <= 0:\n C = np.ones(n_features)\n\n old_theta = C[:, None] * S\n objs = []\n if compute_obj or callback:\n ll = objective(X, y, C, S, alpha)\n objs.append(ll)\n for i in range(maxiter):\n # W = block_diag(X * C[None, None, :], \"csc\")\n # lasso.fit(W, y.flatten())\n # S = lasso.coef_.reshape(n_tasks, n_features).T\n W = X * C[None, None, :]\n for k in range(n_tasks):\n lasso.fit(W[k], y[k])\n S[:, k] = lasso.coef_\n Z = S.T[:, None, :] * X\n Z = Z.reshape(n_tasks * n_samples, n_features)\n lasso_p.fit(Z, y.flatten())\n C = lasso_p.coef_\n theta = C[:, None] * S\n dll = abs(theta - old_theta).max()\n dll /= max(theta.max(), old_theta.max(), 1.)\n old_theta = theta.copy()\n if compute_obj or callback:\n ll = objective(X, y, C, S, alpha)\n objs.append(ll)\n if callback:\n callback(theta, obj=ll)\n if dll < tol:\n break\n\n if i == maxiter - 1:\n print(\"**************************************\\n\"\n \"******** WARNING: Stopped early. *****\\n\"\n \"\\n\"\n \"You may want to increase maxiter. Last err: %f\" % dll)\n return C, S, objs", "def fit(self, adjacency: Union[sparse.csr_matrix, np.ndarray]) -> 'LaplacianEmbedding':\n adjacency = check_format(adjacency).asfptype()\n check_square(adjacency)\n check_symmetry(adjacency)\n n = adjacency.shape[0]\n\n regularize: bool = not (self.regularization is None or self.regularization == 0.)\n check_scaling(self.scaling, adjacency, regularize)\n\n if regularize:\n solver: EigSolver = LanczosEig()\n else:\n solver = set_solver(self.solver, adjacency)\n n_components = 1 + check_n_components(self.n_components, n-2)\n\n weights = adjacency.dot(np.ones(n))\n regularization = self.regularization\n if regularization:\n if self.relative_regularization:\n regularization = regularization * weights.sum() / n ** 2\n weights += regularization * n\n laplacian = LaplacianOperator(adjacency, regularization)\n else:\n weight_diag = sparse.diags(weights, format='csr')\n laplacian = weight_diag - adjacency\n\n solver.which = 'SM'\n solver.fit(matrix=laplacian, n_components=n_components)\n eigenvalues = solver.eigenvalues_[1:]\n eigenvectors = solver.eigenvectors_[:, 1:]\n\n embedding = eigenvectors.copy()\n\n if self.scaling:\n eigenvalues_inv_diag = diag_pinv(eigenvalues ** self.scaling)\n embedding = eigenvalues_inv_diag.dot(embedding.T).T\n\n if self.normalized:\n embedding = normalize(embedding, p=2)\n\n self.embedding_ = embedding\n self.eigenvalues_ = eigenvalues\n self.eigenvectors_ = eigenvectors\n self.regularization_ = regularization\n\n return self", "def solver(u_init, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = 1e-3, max_iter = 5000, verbose = 0, nnls_max_iter=30):\n\n # Raise('NotImplementedError: only adjusted the arguments.')\n #Need to incorporate L_lhs into stacked and appropriate w_lin updates, u_update and eta_lin increments\n #precompute the expensive operation:\n lin_penalties = 1/np.sqrt(2*eta_lin)\n eta_T_H_L_stacked = scipy.sparse.vstack([T.multiply(1/np.sqrt(2*eta_0))] + [H[i].multiply(1/np.sqrt(2*eta[i])) for i in range(len(H))] + [L_lhs.multiply(lin_penalties[:,None])])\n #!!!!\n# premultiplied_lhs = eta_T_H_stacked.T.dot(eta_T_H_stacked).toarray()\n #!!!!\n u_prev = u_init + 1\n u = u_init\n count = 0\n obj_history = []\n relaxed_obj_history = [-1, 0.1] #just two initial values to enter the loop\n while np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2]) > ftol and count < max_iter:#np.linalg.norm(u - u_prev, np.inf) > 1e-3 and count < max_iter: #Maybe all of them stop changing\n start = time.time()\n \n u_prev = np.copy(u)\n w_0 = w_0_update(eta_0, u, T, alpha, B) \n w = w_update(u, H, gamma, D, C) \n w_lin = w_lin_update(u, L_lhs, L_rhs)\n# u = u_update(eta_0, eta, w_0, w, eta_T_H_stacked, nnls_max_iter=50)\n #!!!!\n # u = u_update(eta_0, eta, w_0, w, eta_T_H_L_stacked, nnls_max_iter=30)\n u = u_update(eta_0, eta, eta_lin, w_0, w, w_lin, eta_T_H_L_stacked, premultiplied_lhs = None, nnls_max_iter=nnls_max_iter)\n #!!!!\n count += 1 \n if count == 10:\n u_inf = np.copy(u)\n w_0_inf = w_0[:]\n w_inf = w[:]\n w_lin_inf = w_lin[:]\n if count > 10 and np.abs(cur_obj) > 1e+15: #HANDLE THIS BETTER!!!\n print('INFINITY! RETURNING u at the 10-th iteration to enter the feasibility loop')\n return u_inf, w_0_inf, w_inf, w_lin_inf, obj_history, relaxed_obj_history\n \n cur_obj = obj_u_opt_N_fixed(u, T, alpha, B)\n obj_history.append(cur_obj)\n cur_relaxed_obj = relaxed_obj_u_opt_N_fixed(u, w_0, w, w_lin, eta_0, eta, eta_lin, T, H, L_lhs, alpha, B)\n # relaxed_obj_u_opt_N_fixed(u, w_0, w, eta_0, eta, T, H, alpha, B)\n relaxed_obj_history.append(cur_relaxed_obj) \n \n stop = time.time()\n duration = stop-start\n \n if count%1 == 0 and verbose: \n stopping_criterion = np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2])\n print(' iter = {}, stopping criterion:{}, OBJ {}'.format(count, stopping_criterion, cur_obj))\n print(' This iteration took: {}'.format(duration))\n return u, w_0, w, w_lin, obj_history, relaxed_obj_history", "def solveLSM(self):\n ierr = c_int(1)\n self.fteik2d.fteik_solver2d_solveLSM(ierr)\n if (ierr.value != 0):\n print(\"Error solving eikonal equation\")\n return -1\n return 0\n #errorAll = 0\n #for i in range(self.nsrc):\n # isrc = i + 1\n # self.fteik2d.fteik_solver2d_solveSourceLSM(isrc, ierr)\n # if (ierr.value != 0):\n # print(\"Failed to solve for source %d\"%i+1)\n # errorAll = errorAll + 1\n #return errorAll", "def latent_cost_and_grad(mu_S, kern, Z, dL_dpsi0, dL_dpsi1, dL_dpsi2):\r\n mu, log_S = mu_S.reshape(2, 1, -1)\r\n S = np.exp(log_S)\r\n\r\n psi0 = kern.psi0(Z, mu, S)\r\n psi1 = kern.psi1(Z, mu, S)\r\n psi2 = kern.psi2(Z, mu, S)\r\n\r\n lik = dL_dpsi0 * psi0 + np.dot(dL_dpsi1.flatten(), psi1.flatten()) + np.dot(dL_dpsi2.flatten(), psi2.flatten()) - 0.5 * np.sum(np.square(mu) + S) + 0.5 * np.sum(log_S)\r\n\r\n mu0, S0 = kern.dpsi0_dmuS(dL_dpsi0, Z, mu, S)\r\n mu1, S1 = kern.dpsi1_dmuS(dL_dpsi1, Z, mu, S)\r\n mu2, S2 = kern.dpsi2_dmuS(dL_dpsi2, Z, mu, S)\r\n\r\n dmu = mu0 + mu1 + mu2 - mu\r\n # dS = S0 + S1 + S2 -0.5 + .5/S\r\n dlnS = S * (S0 + S1 + S2 - 0.5) + .5\r\n return -lik, -np.hstack((dmu.flatten(), dlnS.flatten()))", "def Lasso(X0, Y, lam, w=np.array([0]), maxit=100, normalize=2):\n\n # Obtain size of X\n n, d = X0.shape\n X = np.zeros((n, d), dtype=np.complex64)\n Y = Y.reshape(n, 1)\n\n # Create w if none is given\n if w.size != d:\n w = np.zeros((d, 1), dtype=np.complex64)\n w_old = np.zeros((d, 1), dtype=np.complex64)\n\n # First normalize data\n if normalize != 0:\n Mreg = np.zeros((d, 1))\n for i in range(0, d):\n Mreg[i] = 1.0 / (np.linalg.norm(X0[:, i], normalize))\n X[:, i] = Mreg[i] * X0[:, i]\n else:\n X = X0\n\n # Lipschitz constant of gradient of smooth part of loss function\n L = np.linalg.norm(X.T.dot(X), 2)\n\n # Now loop until converged or max iterations\n for iters in range(0, maxit):\n\n # Update w\n z = w + iters / float(iters + 1) * (w - w_old)\n w_old = w\n z = z - X.T.dot(X.dot(z) - Y) / L\n for j in range(d):\n w[j] = np.multiply(np.sign(z[j]), np.max([abs(z[j]) - lam / L, 0]))\n\n # Could put in some sort of break condition based on convergence here.\n\n # Now that we have the sparsity pattern, used least squares.\n biginds = np.where(w != 0)[0]\n if biginds != []: w[biginds] = np.linalg.lstsq(X[:, biginds], Y)[0]\n\n # Finally, reverse the regularization so as to be able to use with raw data\n if normalize != 0:\n return np.multiply(Mreg, w)\n else:\n return w", "def anl_solution(self):\r\n\r\n m = float(self.mass) / self.nu_m\r\n qe = 1 / self.nu_m * (self.nu_t * self.nu_t / self.nu_x) * 1.0 \\\r\n / float(self.size_tick * self.size_tick)\r\n print 'qE=', qe\r\n c = self.light_vel\r\n for i in range(0, len(self.obs.obt_g)):\r\n ddt = float(self.obs.obt[i] - self.obs.obt[i - 1])\r\n x = m * c ** 2 / qe * (math.sqrt(1.0 + (qe * self.t[i] / (m\r\n * c)) ** 2) - 1.0)\r\n self.xa_track.append(x)\r\n p = qe * self.t[i]\r\n self.pa.append(p)\r\n v = p / math.sqrt(m ** 2 + (p / c) ** 2)\r\n jv = self.t[i] * qe / (m * c)\r\n v = math.sqrt(jv * jv / (1 + jv * jv)) * c\r\n self.va.append(v)\r\n print 'Analytical solution of the differential equation of motion'", "def write_ldl_lsolve(f, variables):\n\n data = variables['data']\n priv = variables['priv']\n Lp = priv['L']['p']\n\n f.write(\"void LDL_lsolve(LDL_int n, c_float X [ ], LDL_int Lp [ ]\")\n f.write(\", LDL_int Li [ ], c_float Lx [ ]){\\n\")\n f.write(\"LDL_int p;\\n\")\n\n # Unroll for loop\n for j in range(data['m'] + data['n']):\n if Lp[j+1] > Lp[j]: # Write loop ONLY if necessary\n f.write(\"for (p = %i ; p < %i ; p++){\\n\" % (Lp[j], Lp[j+1]))\n f.write(\"X [Li [p]] -= Lx [p] * X [%i];\\n\" % (j))\n f.write(\"}\\n\")\n\n # Close function\n f.write(\"}\\n\\n\")", "def dLdp(C1s,C0s,ks,bs,sigma=1):\n # return np.array(jit(jacfwd(L,argnums=1))(q,ps,C1s,C0s,ks,bs,sigma))\n \n # A = FIM(q,ps,C1s,C0s,ks,bs,sigma)\n \n # Construct A(q,ps)\n A = FIM(C1s,C0s,ks,bs,sigma)\n\n # Construct dAdp(q,ps)\n dAdp = jit(jacfwd(A,argnums=1))\n \n # Construct inv_A(q,ps)\n inv_A=lambda q,ps: jnp.linalg.inv(A(q,ps))\n \n # print(np.trace(-dAinv(inv_A,dAdp),axis1=0,axis2=1)-np.array(jit(jacfwd(L,argnums=1))(q,ps,C1s,C0s,ks,bs,sigma)))\n \n # Construct dLdP(q,ps)\n\n\n\n return lambda q,ps: -np.array(jnp.trace(dAinv(inv_A(q,ps),dAdp(q,ps)),axis1=0,axis2=1))", "def test_sdp(self):\n a = sp.rand(100, 100, .1, random_state=1)\n a = a.todense()\n X = Variable(100, 100)\n obj = at.norm(X, \"nuc\") + at.norm(X-a, 'fro')\n p = Problem(Minimize(obj))\n p.solve(solver=\"SCS\")", "def case_spd_linsys(\n spd_matrix: Union[np.ndarray, scipy.sparse.spmatrix, linops.LinearOperator],\n rng: np.random.Generator,\n) -> problems.LinearSystem:\n return random_linear_system(rng=rng, matrix=spd_matrix)", "def fit(self, X):\n self._causal_order = None\n self._adjacency_matrices = None\n\n X = check_array(X)\n\n lingam_model = self._lingam_model\n if lingam_model is None:\n lingam_model = DirectLiNGAM()\n elif not isinstance(lingam_model, _BaseLiNGAM):\n raise ValueError(\"lingam_model must be a subclass of _BaseLiNGAM\")\n\n phis = self._ar_coefs\n thetas = self._ma_coefs\n order = self._order\n\n if phis is None or thetas is None:\n phis, thetas, order, residuals = self._estimate_varma_coefs(X)\n else:\n p = phis.shape[0]\n q = thetas.shape[0]\n residuals = self._calc_residuals(X, phis, thetas, p, q)\n\n model = lingam_model\n model.fit(residuals)\n\n psis, omegas = self._calc_psi_and_omega(\n model.adjacency_matrix_, phis, thetas, order\n )\n\n if self._prune:\n ee = np.dot(\n np.eye(model.adjacency_matrix_.shape[0]) - model.adjacency_matrix_,\n residuals.T,\n ).T\n psis, omegas = self._pruning(X, ee, order, model.causal_order_)\n\n self._ar_coefs = phis\n self._ma_coefs = thetas\n self._order = order\n self._residuals = residuals\n\n self._causal_order = model.causal_order_\n self._adjacency_matrices = (psis, omegas)\n\n return self", "def experiment_linear_l1(_):\n # Attack epsilon is manually set according to the norm of the min-norm\n # solution found using cvxpy for d/n=10. That is max-margin=1/min-norm.\n # Min linf-norm solution found (norm=0.0422)\n # Min l2-norm solution found (norm=0.3411)\n # Min l1-norm solution found (norm=1.8497)\n # Min l4-norm solution found (norm=0.0002)\n # Min l1.5-norm solution found (norm=0.5274)\n return experiment_linear_lp(\n adv_norm_type='l1',\n dual_norm_type='linf',\n baseline_norm_types=['l2'],\n attack_step_dir='grad_max')", "def optimize_linear(grads, eps, ordr):\n\n red_ind = list(range(1, len(grads.size())))\n azdiv = torch.tensor(1e-12, dtype=grads.dtype, device=grads.device)\n\n if ordr == np.inf:\n opt_pert = torch.sign(grads)\n\n elif ordr == 1:\n abs_grad = torch.abs(grads)\n sign = torch.sign(grads)\n ori_shape = [1] * len(grads.size())\n ori_shape[0] = grads.size(0)\n\n max_abs_grad, _ = torch.max(abs_grad.view(grads.size(0), -1), 1)\n max_mask = abs_grad.eq(max_abs_grad.view(ori_shape)).float()\n num_ties = max_mask\n for red_scalar in red_ind:\n num_ties = torch.sum(num_ties, red_scalar, keepdims=True)\n opt_pert = sign * max_mask / num_ties\n # TODO tests\n\n elif ordr == 2:\n # TODO\n square = torch.max(azdiv, torch.sum(grads ** 2, red_ind, keepdim=True))\n opt_pert = grads / torch.sqrt(square)\n # TODO tests\n else:\n raise NotImplementedError('Only L-inf, L1 and L2 norms are '\n 'currently implemented.')\n\n scaled_pert = eps * opt_pert\n return scaled_pert", "def laplacian( graph : SpatialGraph, \n sparse : bool = False\n ) -> Union[np.ndarray, sp.spmatrix] :\n adj = adjacency(graph, sparse=sparse)\n dgr = sp.diags(np.array(adj.sum(1))) if sparse else np.diag(np.array(adj.sum(1)))\n return adj - dgr", "def LML(self, theta):\n t = [exp(h) for h in theta]\n a = t[0]\n s = array(t[1:])\n K_xx = self.build_covariance(a, s*self.scale_lengths)\n\n try: # protection against singular matrix error crash\n sgn, ldet = slogdet(K_xx)\n if sgn is -1: print(' # WARNING # - negative determinant')\n L = dot( self.y.T, solve( K_xx, self.y ) ) + ldet\n except:\n L = 1e50\n return L", "def LDL(A, d):\n n = shape(A)[0]\n L = array(eye(n))\n dg = zeros(n)\n dg[0] = A[0, 0]\n for k in range(1, n):\n m = reshape(array(A[:k, k].copy()), k)\n rforwardsolve(L[:k, :k], m, d)\n L[k, :k] = m/dg[:k]\n dg[k] = A[k, k] - dot(L[k, :k], m)\n return L, dg", "def solve_l1(y, A_fun, AT_fun, lambda_l1, reshape_img_fun, show_img_progress=False, alpha=0.2, max_iter=100, solver_tol=1e-6):\n\n\n obj_lss = np.zeros(max_iter)\n x_zs = np.zeros(max_iter)\n u_norms = np.zeros(max_iter)\n times = np.zeros(max_iter)\n\n ATy = AT_fun(y)\n x_shape = ATy.shape\n d = np.prod(x_shape)\n\n def A_cgs_fun(x):\n x = np.reshape(x, x_shape, order='F')\n y = AT_fun(A_fun(x)) + alpha * x\n return vec(y)\n A_cgs = LinearOperator((d,d), matvec=A_cgs_fun, dtype='float')\n\n def compute_p_inv_A(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs, vec(b), x0=vec(z0), tol=1e-3, maxiter=100)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = np.reshape(z, x_shape, order='F')\n return z\n\n\n def A_cgs_fun_init(x):\n x = np.reshape(x, x_shape, order='F')\n y = AT_fun(A_fun(x))\n return vec(y)\n A_cgs_init = LinearOperator((d,d), matvec=A_cgs_fun_init, dtype='float')\n\n def compute_init(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs_init, vec(b), x0=vec(z0), tol=1e-2)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = np.reshape(z, x_shape, order='F')\n return z\n\n # initialize z and u\n z = compute_init(ATy, ATy)\n u = np.zeros(x_shape)\n\n\n plot_normalozer = matplotlib.colors.Normalize(vmin=0.0, vmax=1.0, clip=True)\n\n\n start_time = timeit.default_timer()\n\n for iter in range(max_iter):\n\n # x-update\n net_input = z+u\n Wzu, wbook = wavelet_transform(net_input)\n q = soft_threshold(Wzu, lambda_l1/alpha)\n x = inverse_wavelet_transform(q, wbook, x_shape)\n x = np.reshape(x, x_shape)\n\n # z-update\n b = ATy + alpha * (x - u)\n z = compute_p_inv_A(b, z)\n\n # u-update\n u += z - x;\n\n if show_img_progress == True:\n\n fig = plt.figure('current_sol')\n plt.gcf().clear()\n fig.canvas.set_window_title('iter %d' % iter)\n plt.subplot(1,3,1)\n plt.imshow(reshape_img_fun(np.clip(x, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('x')\n plt.subplot(1,3,2)\n plt.imshow(reshape_img_fun(np.clip(z, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('z')\n plt.subplot(1,3,3)\n plt.imshow(reshape_img_fun(np.clip(net_input, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('netin')\n plt.pause(0.00001)\n\n\n obj_ls = 0.5 * np.sum(np.square(y - A_fun(x)))\n x_z = np.sqrt(np.mean(np.square(x-z)))\n u_norm = np.sqrt(np.mean(np.square(u)))\n\n print('iter = %d: obj_ls = %.3e |x-z| = %.3e u_norm = %.3e' % (iter, obj_ls, x_z, u_norm))\n\n\n obj_lss[iter] = obj_ls\n x_zs[iter] = x_z\n u_norms[iter] = u_norm\n times[iter] = timeit.default_timer() - start_time\n\n if x_z < solver_tol:\n break\n\n infos = {'obj_lss': obj_lss, 'x_zs': x_zs, 'u_norms': u_norms,\n 'times': times, 'alpha':alpha, 'lambda_l1':lambda_l1,\n 'max_iter':max_iter, 'solver_tol':solver_tol}\n\n\n return (x, z, u, infos)", "def _ls_solver(A, B, warm_start=None):\n # TODO - do conjugate gradient if n is too large\n return np.linalg.lstsq(A.T, B.T)[0].T", "def experiment_linear_linf(_):\n # Attack epsilon is manually set according to the norm of the min-norm\n # solution found using cvxpy for d/n=10. That is max-margin=1/min-norm.\n # Min linf-norm solution found (norm=0.0422)\n # Min l2-norm solution found (norm=0.3411)\n # Min l1-norm solution found (norm=1.8497)\n # Min l4-norm solution found (norm=0.0002)\n # Min l1.5-norm solution found (norm=0.5274)\n return experiment_linear_lp(\n adv_norm_type='linf',\n dual_norm_type='l1',\n baseline_norm_types=['l2'],\n attack_step_dir='sign_grad')", "def calc_lampam_sym(ss, constraints):\n if isinstance(ss, list):\n lampam = np.zeros((len(ss), 12), float)\n for index in range(len(ss)):\n lampam[index] = calc_lampam_sym(ss[index], constraints)\n return lampam\n if ss.ndim == 2 and ss.shape[0] > 1:\n lampam = np.zeros((ss.shape[0], 12), float)\n for index in range(ss.shape[0]):\n lampam[index] = calc_lampam_sym(ss[index], constraints)\n return lampam\n\n n_plies_in_panels = 2 * np.size(ss) # laminate ply count\n\n cos_sin = np.empty((4, n_plies_in_panels // 2), float)\n for ind in range(n_plies_in_panels // 2):\n cos_sin[:, ind] = constraints.cos_sin[\n constraints.ind_angles_dict[ss[ind]]].reshape((4, ))\n\n for_the_top = np.arange(n_plies_in_panels // 2)\n z_0 = np.ones(n_plies_in_panels // 2)\n z_2 = ((1 - n_plies_in_panels / 2) * z_0 + for_the_top) ** 3 \\\n - ((1 - n_plies_in_panels / 2) * z_0 + for_the_top - 1) ** 3\n lampam = np.array([\n (2 / n_plies_in_panels)*np.matmul(cos_sin, z_0),\n np.array([0, 0, 0, 0]),\n (8 / n_plies_in_panels**3)*np.matmul(cos_sin, z_2)]).reshape(12)\n return lampam", "def generate_direct_solver(self, grid=None):\n if grid is None:\n # LOG.debug(\"Generate Solver for internal Spare Matrix: %s\" % self.sp_matrix)\n solver = spla.factorized(self.sp_matrix)\n else:\n # LOG.debug(\"Generate Solver for given Grid %s\" % (grid,))\n sp_matrix = self.to_sparse_matrix(grid, \"csc\")\n # LOG.debug(\" with Sparse Matrix: %s\" % sp_matrix.todense())\n # print(\"Jahier\\n\", sp_matrix.todense())\n # print(\"Jahier.shape\\n\", sp_matrix.todense().shape)\n solver = spla.factorized(sp_matrix)\n return solver", "def Schechter_L(L, phi_s, L_s, alpha):\n\treturn phi_s * (L / L_s)**alpha * n.e**(-L / L_s) / L_s", "def calculate_latent(self, theta, Xm):\n n_samples, n_visible = Xm.shape\n log_p_y_given_x_unnorm = np.empty((self.n_hidden, n_samples, self.dim_hidden))\n memory_size = float(n_samples * n_visible * self.n_hidden * self.dim_hidden * 64) / 1000**3 # GB\n batch_size = np.clip(int(self.ram * n_samples / memory_size), 1, n_samples)\n for l in range(0, n_samples, batch_size):\n log_marg_x = self.calculate_marginals_on_samples(theta, Xm[l:l+batch_size]) # LLRs for each sample, for each var.\n log_p_y_given_x_unnorm[:, l:l+batch_size, :] = self.log_p_y + np.einsum('ikl,ijkl->ijl', self.alpha, log_marg_x, optimize=False)\n return self.normalize_latent(log_p_y_given_x_unnorm)", "def GroupLassoADMM(As, bs, lam, groups, rho, alpha, maxiter=1000, abstol=1e-4, reltol=1e-2):\n \n n,D = As[0].shape\n m = len(As)\n \n Atbs = [A.T.dot(b) for (A,b) in zip(As,bs)]\n \n Ls = [Factor(A,rho) for A in As]\n Us = [L.T for L in Ls]\n \n x = np.zeros((m*D,1))\n z = 1e-5*np.random.randn(m*D,1)\n u = 1e-5*np.random.randn(m*D,1)\n \n # Indices of x for each timestep. x[Ts[t]] is the coefficient vector for time t\n Ts = [j*D + np.arange(D) for j in range(m)]\n \n history = {}\n history['objval'] = []\n history['gl_objval'] = []\n history['r_norm'] = []\n history['s_norm'] = []\n history['eps_pri'] = []\n history['eps_dual'] = []\n \n for k in range(maxiter):\n \n # x update\n for j in range(m):\n q = Atbs[j] + rho*(z[Ts[j]]-u[Ts[j]])\n\n if n >= D:\n x[Ts[j]] = Solve(Us[j],Solve(Ls[j],q))\n else:\n x[Ts[j]] = q/rho-As[j].T.dot(Solve(Us[j],Solve(Ls[j],As[j].dot(q))))/rho**2\n \n # z update\n zold = np.copy(z)\n x_hat = alpha*x+(1-alpha)*zold\n for g in groups:\n z[g] = Shrinkage(x_hat[g]+u[g], lam/rho)\n \n u = u+(x_hat-z)\n \n # record history\n history['objval'].append(ObjectiveADMM(As,bs,Ts,lam,groups,x,z))\n history['gl_objval'].append(ObjectiveGLASSO_block(As,bs,Ts,lam,groups,x))\n history['r_norm'].append(Norm(x-z))\n history['s_norm'].append(Norm(rho*(z-zold)))\n history['eps_pri'].append(np.sqrt(m)*abstol+reltol*np.max([Norm(x),Norm(z)]))\n history['eps_dual'].append(np.sqrt(m)*abstol+reltol*Norm(rho*u))\n \n # check for termination\n if (history['r_norm'][-1] < history['eps_pri'][-1]) and \\\n (history['s_norm'][-1] < history['eps_dual'][-1]):\n break\n \n # Return unbiased sparse predictor\n z = z.reshape(D,m, order = 'F')\n nz_coords = np.where(np.sum(abs(z), axis = 1) != 0)[0]\n if len(nz_coords) != 0: \n for j in range(m):\n z[nz_coords,j] = np.linalg.lstsq(As[j][:, nz_coords], bs[j])[0][:,0]\n \n return z, history", "def fit(self, adjacency: Union[sparse.csr_matrix, np.ndarray]) -> 'Spectral':\n adjacency = check_format(adjacency).asfptype()\n check_square(adjacency)\n check_symmetry(adjacency)\n n = adjacency.shape[0]\n\n solver = set_solver(self.solver, adjacency)\n n_components = 1 + check_n_components(self.n_components, n-2)\n\n regularize: bool = not (self.regularization is None or self.regularization == 0.)\n check_scaling(self.scaling, adjacency, regularize)\n\n weights = adjacency.dot(np.ones(n))\n regularization = self.regularization\n if regularization:\n if self.relative_regularization:\n regularization = regularization * weights.sum() / n ** 2\n weights += regularization * n\n\n # Spectral decomposition of the normalized adjacency matrix\n weights_inv_sqrt_diag = diag_pinv(np.sqrt(weights))\n\n if regularization:\n norm_adjacency = NormalizedAdjacencyOperator(adjacency, regularization)\n else:\n norm_adjacency = weights_inv_sqrt_diag.dot(adjacency.dot(weights_inv_sqrt_diag))\n\n solver.which = 'LA'\n solver.fit(matrix=norm_adjacency, n_components=n_components)\n eigenvalues = solver.eigenvalues_\n index = np.argsort(-eigenvalues)[1:] # skip first eigenvalue\n eigenvalues = eigenvalues[index]\n eigenvectors = weights_inv_sqrt_diag.dot(solver.eigenvectors_[:, index])\n\n embedding = eigenvectors.copy()\n\n if self.scaling:\n eigenvalues_inv_diag = diag_pinv((1 - eigenvalues) ** self.scaling)\n embedding = eigenvalues_inv_diag.dot(embedding.T).T\n\n if self.normalized:\n embedding = normalize(embedding, p=2)\n\n self.embedding_ = embedding\n self.eigenvalues_ = eigenvalues\n self.eigenvectors_ = eigenvectors\n self.regularization_ = regularization\n\n return self", "def fit_LuEd(self, wl, Ls, Lu, Ed, params, weights, verbose=True):\n\n\t\t\tdef min_funct(params):\n\t\t\t\tp = params.valuesdict() \n\t\t\t\n\t\t\t\tRrs_modelled, Rrs_refl, Lu_Ed_modelled = self.model(beta = p['beta'], alpha = p['alpha'], am = p['am'], rh = p['rh'], pressure = p['pressure'], C_chl = p['C_chl'], C_sm = p['C_sm'], C_mie = p['C_mie'], n_mie = p['n_mie'], C_y = p['C_y'], S_y = p['S_y'], T_w = p['T_w'], theta_sun = p['theta_sun'], theta_view = p['theta_view'], n_w = p['n_w'], rho_s = p['rho_s'], rho_dd = p['rho_dd'], rho_ds = p['rho_ds'], delta = p['delta'], wl = wl, a_w = self.spectra['a_w'].values, daw_dT = self.spectra['daw_dT'].values, astar_ph = self.spectra['astar_ph'].values, astar_y = self.spectra['astar_y'].values, Ls_Ed = Ls/Ed)\n\n\t\t\t\tRrs_obs = Lu/Ed - Rrs_refl\n\n\t\t\t\t# Least squares\n\t\t\t\tresid = np.sum((Lu_Ed_modelled - Lu/Ed)**2 * weights)\n\n\t\t\t\treturn resid, Rrs_modelled, Rrs_refl, Lu_Ed_modelled, Rrs_obs\n\n\t\t\tstart_time = time.time()\n\n\t\t\treg = lm.minimize(lambda x: min_funct(x)[0], params=params, method='lbfgsb', options={'disp': verbose, 'gtol': 1e-16, 'eps': 1e-07, 'maxiter': 15000, 'ftol': 1e-16, 'maxls': 20, 'maxcor': 20}) \n\n\t\t\tprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\n\t\t\tresid, Rrs_modelled, Rrs_refl, Lu_Ed_modelled, Rrs_obs = min_funct(reg.params)\n\t\t\treg.params.add('resid', resid, False, 0.0, 100, None)\n\n\t\t\treturn reg, Rrs_modelled, Rrs_refl, Lu_Ed_modelled, Rrs_obs", "def update_latent(self, variable_mat, weight_mat, output_mat, y_list):\n new_latent = {k: np.zeros(self.H_mat[k].shape) for k in self.H_mat}\n y_sigma = [np.array([self.margin ** -1.0 if i == 1.0 else 4.0\n for i in y])\n for y in y_list]\n\n new_latent['sigma'] = np.linalg.inv(\n np.diag([self.sigma_h ** -1.0 for _ in range(self.R)])\n + reduce(lambda x, y: x + y,\n [(np.outer(weight_mat['mu'][1:, i],\n weight_mat['mu'][1:, i])\n + weight_mat['sigma'][i][1:, 1:])\n / (np.prod(y_sigma[0]) ** (1.0 / self.sample_count))\n for i in range(self.task_count)])\n )\n\n new_latent['mu'] = np.dot(\n new_latent['sigma'],\n np.dot(variable_mat['mu'].transpose(),\n self.kernel_mat) / self.sigma_h\n + reduce(\n lambda x, y: x + y,\n [(np.outer(weight_mat['mu'][1:, i], output_mat['mu'][i, :])\n - np.repeat(a=np.array([\n [x * weight_mat['mu'][0, i] + y for x, y in\n zip(weight_mat['mu'][1:, i],\n weight_mat['sigma'][i, 1:, 0])]]\n ), repeats=self.sample_count, axis=0).transpose())\n / y_sigma[i]\n for i in range(self.task_count)]\n )\n )\n\n return new_latent", "def gel_solve(\n A,\n y,\n l_1,\n l_2,\n ns,\n b_init=None,\n block_solve_fun=block_solve_agd,\n block_solve_kwargs=None,\n max_cd_iters=None,\n rel_tol=1e-6,\n Cs=None,\n Is=None,\n verbose=False,\n):\n p = len(A)\n m = len(y)\n device = A[0].device\n dtype = A[0].dtype\n y = y.to(device, dtype)\n if block_solve_kwargs is None:\n block_solve_kwargs = dict()\n\n # Create initial values if not specified.\n if b_init is None:\n b_init = 0.0, torch.zeros(p, max(ns), device=device, dtype=dtype)\n\n if not isinstance(ns, torch.Tensor):\n ns = torch.tensor(ns)\n sns = ns.to(device, dtype).sqrt()\n a_1 = l_1 * sns\n ma_1 = m * a_1\n a_2 = 2 * l_2 * sns\n b_0, B = b_init\n b_0_prev, B_prev = b_0, B\n k = 1 # iteration number\n pbar_stats = {} # stats for the outer progress bar\n pbar = tqdm.tqdm(\n desc=\"Solving gel with CD (l_1 {:.2g}, l_2 {:.2g})\".format(l_1, l_2),\n disable=not verbose,\n )\n\n while True:\n # First minimize with respect to b_0. This has a closed form solution\n # given by b_0 = 1'@(y - sum_j A_j@b_j) / m.\n b_0 = (y - sum(A[j] @ B[j, : ns[j]] for j in range(p))).sum() / m\n\n # Now, minimize with respect to each b_j.\n for j in tqdm.trange(\n p, desc=\"Solving individual blocks\", disable=not verbose, leave=False\n ):\n r_j = y - b_0 - sum(A[k] @ B[k, : ns[k]] for k in range(p) if k != j)\n\n # Check if b_j must be set to 0. The condition is ||A_j'@r_j|| <=\n # m*a_1.\n if (A[j].t() @ r_j).norm(p=2) <= ma_1[j]:\n B[j] = 0\n else:\n # Otherwise, minimize. First make sure initial value is not 0.\n if len((B[j, : ns[j]].abs() < 1e-6).nonzero()) == ns[j]:\n B[j, : ns[j]] = 1e-3\n\n # Add C_j and I_j to the arguments if using Newton's method.\n if block_solve_fun is block_solve_newton:\n block_solve_kwargs[\"C_j\"] = Cs[j]\n block_solve_kwargs[\"I_j\"] = Is[j]\n\n B[j, : ns[j]] = block_solve_fun(\n r_j,\n A[j],\n a_1[j].item(),\n a_2[j].item(),\n m,\n B[j, : ns[j]],\n verbose=verbose,\n **block_solve_kwargs,\n )\n\n # Compute relative change in b.\n b_0_diff = b_0 - b_0_prev\n B_diff = B - B_prev\n delta_norm = (b_0_diff ** 2 + (B_diff ** 2).sum()).sqrt()\n b_norm = (b_0 ** 2 + (B ** 2).sum()).sqrt()\n\n pbar_stats[\"rel change\"] = \"{:.2g}\".format(delta_norm.item() / b_norm.item())\n pbar.set_postfix(pbar_stats)\n pbar.update()\n\n # Check max iterations exit criterion.\n if max_cd_iters is not None and k == max_cd_iters:\n break\n k += 1\n\n # Check tolerance exit criterion.\n if delta_norm.item() <= rel_tol * b_norm.item() and k > 2:\n break\n b_0_prev, B_prev = b_0, B\n\n pbar.close()\n return b_0.item(), B", "def optimize(self):\n\n self.logger.info(\"Solving with Dynamic Slope Scaling Procedure in Julia :\")\n optimization_start = time.time()\n\n # 1. Preprocess for old network graph\n if self.old_network_graph is not None:\n\n # DSSP on old network\n old_network_obj = sum(list(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).values()))-1e-5\n try:\n self.check_infeasibility(self.old_network_graph, old_network_obj)\n except DHCOptimizerException as e:\n e.data = \"Invalid existing network: \" + e.data\n raise e\n\n flows, obj_val = self.optimize_with_dssp_julia(self.old_network_graph, old_network_obj, set())\n self.logger.info(\"Optimization phase time: %.2fs\" % (time.time() - optimization_start))\n solution_old_graph = self.build_solution_graph(self.old_network_graph, flows)\n\n if self.modify_old_network:\n\n # Add max capacity on old edges\n self.old_capacity = deepcopy(flows)\n old_buildings = list(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).values())\n for key in flows:\n if (key[1],key[0],0) not in self.old_capacity and key[1] not in old_buildings:\n self.old_capacity[(key[1],key[0],0)] = self.old_capacity[key]\n\n # Add Imaginary edges\n for edge in self.old_capacity:\n if self.optimization_graph.has_edge(*edge):\n\n # add nodes\n if not self.optimization_graph.has_node(config.IM_PREFIX+edge[0]):\n self.optimization_graph.add_node(config.IM_PREFIX+edge[0])\n self.optimization_graph.nodes[config.IM_PREFIX+edge[0]][config.GPD_GEO_KEY] = \\\n self.optimization_graph.nodes[edge[0]][config.GPD_GEO_KEY]\n if not self.optimization_graph.has_node(config.IM_PREFIX+edge[1]):\n self.optimization_graph.add_node(config.IM_PREFIX+edge[1])\n self.optimization_graph.nodes[config.IM_PREFIX+edge[1]][config.GPD_GEO_KEY] = \\\n self.optimization_graph.nodes[edge[1]][config.GPD_GEO_KEY]\n # add edges\n if not self.optimization_graph.has_edge(edge[0],config.IM_PREFIX+edge[0]):\n self.optimization_graph.add_edge(edge[0],config.IM_PREFIX+edge[0])\n if not self.optimization_graph.has_edge(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1]):\n self.optimization_graph.add_edge(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1])\n if not self.optimization_graph.has_edge(config.IM_PREFIX+edge[1],edge[1]):\n self.optimization_graph.add_edge(config.IM_PREFIX+edge[1],edge[1])\n\n # put cost\n self.optimization_graph.edges[(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1],0)][config.EDGE_COST_KEY] = \\\n self.optimization_graph.edges[(edge[0],edge[1],0)][config.EDGE_COST_KEY]\n self.optimization_graph.edges[(edge[0],edge[1],0)][config.EDGE_COST_KEY] = 1e-5\n self.optimization_graph.edges[(edge[0],config.IM_PREFIX+edge[0],0)][config.EDGE_COST_KEY] = 1e-5\n self.optimization_graph.edges[(config.IM_PREFIX+edge[1],edge[1],0)][config.EDGE_COST_KEY] = 1e-5\n\n else:\n # if we don't modify the old network, we have to change the capacity of the supplies\n already_consummed = {}\n for edge in solution_old_graph.edges():\n if solution_old_graph.nodes[edge[0]].get(config.NODE_TYPE_KEY) == config.SUPPLY_NODE_TYPE:\n already_consummed[edge[0]] = already_consummed.get(edge[0], 0) + \\\n solution_old_graph.edges[edge][config.SOLUTION_POWER_FLOW_KEY]\n for source in already_consummed:\n if already_consummed[source] <= self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY]:\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] -= already_consummed[source]\n self.network_objective -= already_consummed[source]\n else:\n self.network_objective -= self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY]\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] = 0\n\n # Remove edges from old network\n edges_to_remove = set()\n for e in self.optimization_graph.edges():\n if self.old_network_graph.has_edge(*e) or self.old_network_graph.has_edge(e[1],e[0]):\n edges_to_remove.add(e)\n self.optimization_graph.remove_edges_from(edges_to_remove)\n\n # Remove isolated buildings of optimization graph\n isolated_to_remove = set()\n for e in self.old_network_graph.edges():\n if e[0] in self.old_network_graph.nodes() and \\\n self.optimization_graph.nodes[e[1]].get(config.NODE_TYPE_KEY) == config.BUILDING_NODE_TYPE:\n isolated_to_remove.add(e)\n self.optimization_graph.remove_edges_from(isolated_to_remove)\n\n # Remove buildings from old network\n for n, data in self.old_network_graph.nodes(data=True):\n if data.get(config.NODE_TYPE_KEY) == config.BUILDING_NODE_TYPE:\n self.optimization_graph.remove_node(n)\n\n # Re-link sources\n sources = set()\n for n, data in self.optimization_graph.nodes(data=True):\n if data.get(config.NODE_TYPE_KEY) == config.SUPPLY_NODE_TYPE:\n sources.add(n)\n source_graph = self.optimization_graph.subgraph(sources).copy()\n self.optimization_graph.remove_nodes_from(sources)\n gnx.remove_isolates(self.optimization_graph)\n node_filter = lambda n: self.optimization_graph.nodes.get(n,{}).get(config.NODE_TYPE_KEY) != config.BUILDING_NODE_TYPE\n gnx.spatial_points_merge(self.optimization_graph, source_graph.nodes_to_gdf(), node_filter=node_filter, inplace=True)\n\n # fill missing information\n gnx.fill_edges_missing_geometry_attributes(self.optimization_graph)\n gnx.fill_length_attribute(self.optimization_graph, config.EDGE_LENGTH_KEY, only_missing=True)\n gnx.fill_length_attribute(self.optimization_graph, config.EDGE_COST_KEY, only_missing=True)\n for e in self.optimization_graph.edges(keys=True):\n self.optimization_graph.edges[e][config.LEASTCOST_COEF_KEY] = \\\n self.optimization_graph.edges[e].get(config.LEASTCOST_COEF_KEY,0)\n\n\n\n # 2. Process the DSSP on optimization graph\n self.check_is_ready()\n self.check_infeasibility(self.optimization_graph, self.network_objective)\n\n if self.old_network_graph is not None and self.modify_old_network:\n old_buildings = set(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).keys())\n else:\n old_buildings = set()\n flows, obj_val = self.optimize_with_dssp_julia(self.optimization_graph, self.network_objective, old_buildings,postprocess= (not self.modify_old_network))\n self.logger.info(\"Optimization phase time: %.2fs\" % (time.time() - optimization_start))\n self.solution_graph = self.build_solution_graph(self.optimization_graph, flows, self.connected)\n\n # 3. Postprocess for old network graph\n if self.old_network_graph is not None:\n \n if self.modify_old_network:\n # Put the right supply capacity and cost\n for edge in self.old_capacity:\n if self.solution_graph.has_edge(edge[0],edge[1]):\n self.solution_graph.edges[(edge[0],edge[1])][config.EDGE_COST_KEY] = \\\n self.optimization_graph.edges[(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1],0)][config.EDGE_COST_KEY]\n \n # Remove imaginary edges\n imaginary_nodes_to_remove = set()\n nodes_to_relabel = {}\n for edge in self.solution_graph.edges():\n if str(edge[0]).startswith(config.IM_PREFIX) and str(edge[1]).startswith(config.IM_PREFIX):\n real_edge = edge[0][len(config.IM_PREFIX):],edge[1][len(config.IM_PREFIX):]\n self.old_capacity[(real_edge[0], real_edge[1], 0)] = pd.np.inf\n self.old_capacity[(real_edge[1], real_edge[0], 0)] = pd.np.inf\n if not self.solution_graph.has_edge(*real_edge):\n for i in range(2):\n nodes_to_relabel[edge[i]] = real_edge[i]\n else:\n self.solution_graph.edges[real_edge[0],real_edge[1]][config.SOLUTION_POWER_FLOW_KEY] += \\\n self.solution_graph.edges[edge].get(config.SOLUTION_POWER_FLOW_KEY,0)\n imaginary_nodes_to_remove.add(edge[0])\n imaginary_nodes_to_remove.add(edge[1])\n elif str(edge[0]).startswith(config.IM_PREFIX):\n imaginary_nodes_to_remove.add(edge[0])\n elif str(edge[1]).startswith(config.IM_PREFIX):\n imaginary_nodes_to_remove.add(edge[1])\n\n nx.relabel_nodes(self.solution_graph, nodes_to_relabel, copy=False)\n self.solution_graph.remove_nodes_from(list(imaginary_nodes_to_remove))\n for node in nodes_to_relabel.values():\n if self.solution_graph.has_edge(node, node):\n self.solution_graph.remove_edge(node, node)\n\n else:\n for source in nx.get_node_attributes(self.solution_graph, config.SUPPLY_POWER_CAPACITY_KEY):\n self.solution_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] += already_consummed.get(source,0)\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] += already_consummed.get(source,0)\n\n return flows, obj_val", "def add_lp_le_DL_terms(K, lin_pot_mesh, lin_geo_mesh):\n c_0 = 1. / (4. * np.pi)\n num_faces = lin_pot_mesh.get_faces().shape[0]\n pot_faces = lin_pot_mesh.get_faces()\n pot_nodes = lin_pot_mesh.get_nodes()\n num_nodes = pot_nodes.shape[0]\n for face_num in range(num_faces): # integrate over faces\n face_nodes = lin_geo_mesh.get_tri_nodes(face_num)\n face_n = lin_geo_mesh.get_normal(face_num)\n face_hs = lin_geo_mesh.get_hs(face_num)\n for src_num in range(num_nodes): # source points\n src_pt = pot_nodes[src_num]\n is_singular, _local_singular_ind = lin_pot_mesh.check_in_face(src_num, face_num)\n if not is_singular: # regular triangle\n for node_num in range(3): # over 3 triangle nodes\n face_node_global_num = pot_faces[face_num, node_num] # global index for vert\n sub_mat = gq.int_over_tri_lin(\n make_DL_reg_lp_le_quad_func(\n face_n, src_pt, node_num\n ),\n face_nodes,\n face_hs\n )\n j = 3 * face_node_global_num\n K[(3 * src_num):(3 * src_num + 3),\n j:j + 3] += c_0 * sub_mat\n # subtracting the q(x_0) term\n sub_mat = gq.int_over_tri_lin(\n make_DL_cp_le_quad_func(face_n, src_pt),\n face_nodes,\n face_hs\n )\n K[(3 * src_num):(3 * src_num + 3), (3 * src_num):(3 * src_num + 3)] -= c_0 * sub_mat\n # for singular elements, \\hat{x} dotted with normal vector always zero\n # due to flat triangular elements\n\n for src_num in range(num_nodes): # source points\n # whole surface q(x_0) term\n j = 3 * src_num\n K[j:j+3, j:j+3] -= c_0 * (\n 4. * np.pi * np.identity(3)\n )", "def __solve_linear_problem(self, continuity_relaxation=True):\n result = [0] * self.layout_slots\n self.news_pool.sort(key=lambda x: (x.news_category, x.sampled_quality), reverse=True)\n LP_news_pool = []\n done_for_category = False\n category_count = 0\n prev_category = self.news_pool[0].news_category\n # First build a subset of news to easily handle the LP resolution\n for news in self.news_pool:\n if prev_category != news.news_category:\n if category_count < self.layout_slots:\n raise RuntimeWarning(\"Not enough news per category found. There should be at least \" +\n str(self.layout_slots) + \" news with category = \" + prev_category + \", but \"\n \"only \" + str(category_count) + \"are present. The allocation maybe \"\n \"sub-optimal.\")\n category_count = 0\n done_for_category = False\n prev_category = news.news_category\n if not done_for_category:\n LP_news_pool.append(news)\n category_count += 1\n if category_count == self.layout_slots:\n done_for_category = True\n\n # If not all the required news are present, add some other news at random.\n while len(LP_news_pool) < len(self.categories) * self.layout_slots:\n random_news = np.random.choice(self.news_pool)\n if random_news not in LP_news_pool:\n LP_news_pool.append(random_news)\n\n LP_news_pool.sort(key=lambda x: x.news_category, reverse=False)\n thetas = []\n # Compute the vector of coefficients for the LP objective function\n for news in LP_news_pool:\n thetas += [news.sampled_quality] * self.layout_slots\n self.C = list(np.array(thetas) * np.array(self.lambdas))\n\n # Then solve an LP or an ILP\n if continuity_relaxation:\n linear_problem = opt.linprog(A_ub=self.A, b_ub=self.B, c=self.C)\n slots_assegnation_probabilities = []\n slot_counter = 0\n tmp_slot_probabilities = []\n while slot_counter < self.layout_slots:\n i = slot_counter\n while i < len(linear_problem.x):\n tmp_slot_probabilities.append(np.abs(linear_problem.x[i]))\n i += self.layout_slots\n slots_assegnation_probabilities.append(tmp_slot_probabilities.copy())\n tmp_slot_probabilities.clear()\n slot_counter += 1\n\n self.measure_allocation_diversity_bounds_errors(slots_assegnation_probabilities, LP_news_pool, iter=10)\n\n result = self.__de_randomize_LP(LP_news_pool, slots_assegnation_probabilities, self.lp_rand_tech)\n\n else:\n # INITIALIZES AN INTEGER LINEAR PROBLEM\n ILP = LpProblem(\"News_ILP\", LpMaximize)\n ILP_variables = []\n\n for cat in range(len(self.categories)):\n for j in range(self.layout_slots):\n for s in range(self.layout_slots):\n ILP_variables.append(LpVariable(name=str(cat) + \"_\" + str(j) + \"_\" + str(s), lowBound=0, upBound=1, cat=\"Binary\"))\n\n # Objective function addition to the problem\n C = list(np.array(self.C) * -1)\n ILP += lpSum([C[i] * ILP_variables[i] for i in range(len(self.C))])\n\n # Category constraints addition to the problem\n for i in range(len(self.categories)):\n ILP += lpSum([self.A[i][j] * ILP_variables[j] for j in range(len(self.C))]) <= self.B[i]\n\n # Slots capacity constraints addition to the problem\n for i in range(len(self.categories), len(self.categories) + self.layout_slots):\n ILP += lpSum([self.A[i][j] * ILP_variables[j] for j in range(len(self.C))]) <= self.B[i]\n\n # News capacity constraints addition to the problem\n for i in range(len(self.categories) + self.layout_slots, len(self.categories) + self.layout_slots + len(self.categories) * self.layout_slots):\n ILP += lpSum([self.A[i][j] * ILP_variables[j] for j in range(len(self.C))]) <= self.B[i]\n\n ILP.solve()\n\n # FOR EACH SLOT, ISOLATES THE CORRESPONDING VARIABLES\n slots_assegnation_probabilities = []\n slot_counter = 0\n tmp_slot_probabilities = []\n while slot_counter < self.layout_slots:\n i = slot_counter\n while i < len(ILP.variables()):\n tmp_slot_probabilities.append(ILP.variables().__getitem__(i))\n i += self.layout_slots\n slots_assegnation_probabilities.append(tmp_slot_probabilities.copy())\n tmp_slot_probabilities.clear()\n slot_counter += 1\n\n # TAKES THE VARIABLES WHICH VALUE IS 1, THEN ALLOCATES THE CORRESPONDING NEWS IN THE RESULT PAGE\n for i in range(len(result)):\n for probabilities in slots_assegnation_probabilities[i]:\n if probabilities.varValue > 0:\n var_name = probabilities.name\n break\n indexes = var_name.split(\"_\")\n category_index = int(indexes[0])\n news_number = int(indexes[1])\n news_index = category_index * self.layout_slots + news_number\n result[i] = LP_news_pool[news_index]\n\n return result", "def mgas(sg,sp,gpotential,potential,xv,dt,kappa=1.,alpha=1.):\n lrp = lram(sg,sp,gpotential,xv,kappa)\n if lrp<sg.rh: \n dm = alpha *(sg.Mh-sg.M(lrp)) *dt/pr.tdyn(potential,xv[0],xv[2])\n m = max(sg.Mh-dm,cfg.Mres)\n else:\n m = sg.Mh\n return m,lrp", "def _stratified_model_admm(self, shape, Lap, loss_proximal_func, regulariser_proximal_func, graph_data=dict(), \\\n relative_tolerance=1e-5, absolute_tolerance=1e-5, num_jobs=4, \\\n max_cg_iters=10, max_iters=1000, rho=1, tau_decrement=2, tau_increment=2, mu=10, \\\n rho_min=0.1, rho_max=1.0):\n import multiprocessing as mp\n import scipy as sc\n optimal_solution = False\n n = np.prod(shape)\n m = Lap.shape[0]\n\n # Retrieve data from ``graph_data``\n # alpha_init\n if 'alpha_init' in graph_data:\n alpha = graph_data['alpha_init'].copy()\n else:\n alpha = np.zeros((m,) + shape)\n\n primal_residual = np.zeros(alpha.shape)\n primal_residual_tilde = np.zeros(alpha.shape)\n dual_residual = np.zeros(alpha.shape)\n dual_residual_tilde = np.zeros(alpha.shape)\n\n # alpha_tilde\n if 'alpha_tilde' in graph_data:\n alpha = graph_data['alpha_tilde'].copy()\n else:\n alpha_tilde = alpha.copy()\n # alpha_hat\n if 'alpha_hat' in graph_data:\n alpha_hat = graph_data['alpha_hat'].copy()\n else:\n alpha_hat = alpha.copy()\n # u\n if 'u' in graph_data:\n u = graph_data['u'].copy()\n else:\n u = np.zeros(alpha.shape)\n # u_tilde\n if 'u_tilde' in graph_data:\n u_tilde = graph_data['u_tilde'].copy()\n else:\n u_tilde = np.zeros(alpha.shape)\n\n # Multiprocessing\n if m <= num_jobs:\n num_jobs = m\n proximal_pool = mp.Pool(num_jobs)\n\n for iter_j in range(1, max_iters):\n\n # Update alpha\n alpha = loss_proximal_func(t=1./rho, nu=alpha_hat-u, warm_start=alpha, pool=proximal_pool)\n\n # Update alpha_tilde\n alpha_tilde = regulariser_proximal_func(t=1./rho, nu=alpha_hat-u_tilde, warm_start=alpha_tilde, \\\n pool=proximal_pool)\n\n # Update alpha_hat\n\n S = Lap + 2.0 * rho * sc.sparse.eye(m)\n M = sc.sparse.diags(1./S.diagonal() )\n indices = np.ndindex(shape)\n equ_rhs = rho * (alpha.T + alpha_tilde.T + u.T + u_tilde.T)\n\n for j, index in enumerate(indices):\n index_value = index[::-1]\n solution = sc.sparse.linalg.cg(S, equ_rhs[index_value], \\\n M=M, x0=alpha_hat.T[index_value], \\\n maxiter=max_cg_iters)\n solution = solution[0]\n dual_residual.T[index_value] = -rho * (solution - alpha_hat.T[index_value])\n dual_residual_tilde.T[index_value] = dual_residual.T[index_value]\n alpha_hat.T[index_value] = solution\n\n # Updates\n primal_residual = alpha - alpha_hat\n primal_residual_tilde = alpha_tilde - alpha_hat\n u += alpha - alpha_hat\n u_tilde += alpha_tilde - alpha_hat\n\n # Calculation of residual norms and epsilon values\n primal_residual_norm = np.linalg.norm(np.append(primal_residual, primal_residual_tilde), 2)\n dual_residual_norm = np.linalg.norm(np.append(dual_residual, dual_residual_tilde), 2)\n primal_eps = np.sqrt(2. * m * n) * absolute_tolerance + relative_tolerance * \\\n np.max([primal_residual_norm, dual_residual_norm])\n dual_eps = np.sqrt(2. * m * n) * absolute_tolerance + relative_tolerance * \\\n np.linalg.norm(rho * np.append(u, u_tilde))\n\n # Breaking condition!\n if primal_residual_norm <= primal_eps and \\\n dual_residual_norm <= dual_eps:\n optimal_solution = True\n break\n\n rho_update = rho\n if primal_residual_norm > mu * dual_residual_norm:\n rho_update = tau_increment * rho\n elif dual_residual_norm > mu * primal_residual_norm:\n rho_update = rho / tau_decrement\n rho_update = np.clip(rho_update, rho_min, rho_max)\n u *= rho / rho_update\n u_tilde *= rho / rho_update\n rho = rho_update\n\n proximal_pool.close()\n proximal_pool.join()\n output = {'alpha': alpha, \\\n 'alpha_tilde': alpha_tilde, \\\n 'alpha_hat': alpha_hat, \\\n 'u': u, \\\n 'u_tilde': u_tilde}\n\n # Complete later!\n result = {'iterations': iter_j, \\\n 'optimal' :optimal_solution}\n return output, result", "def solver_GP(self, eta, mu, lambda_, S, D, A_Init=None):\n\n epsOut = 1e-3\n maxIter = 200\n maxTrial = 20\n\n N, M = self.X.shape\n if mu == -1:\n nLabeled = np.sum(S + D) / 2\n nUnlabeled = N * (N - 1) / 2 - nLabeled\n mu = nLabeled / nUnlabeled\n\n if not A_Init:\n A_Init = M / (M + 1) * np.diag(np.ones(M))\n\n iterGP = 0\n A_pre = (A_Init + A_Init.T) / 2\n objL_pre = self.computeObj_GP(eta, mu, lambda_, S, D, A_pre)\n A_best = A_pre\n objL_best = objL_pre\n normOut = 1e10\n\n while (normOut > epsOut) and (iterGP < maxIter):\n iterGP += 1\n\n # compute gradient\n grad = self.computeGrad_GP(eta, mu, lambda_, S, D, A_pre)\n\n # choose step size\n deltak = 0.1 * M / (np.linalg.norm(grad, ord='fro') * np.sqrt(iterGP))\n deltakk = deltak\n\n trial = 0\n while trial < maxTrial:\n trial += 1\n\n # gradient ascent\n A = A_pre + np.dot(deltak, grad)\n\n # projection\n A = (A + A.T) / 2\n b, V = np.linalg.eig(A)\n\n b = b * (b > 0)\n A = np.dot(V @ np.diag(b), V.T)\n\n # print(A)\n objL = self.computeObj_GP(eta, mu, lambda_, S, D, A)\n\n if objL >= objL_pre: # success\n objL_pre = objL\n break\n else: # fail\n deltak = deltak / 2\n\n if trial == maxTrial: # fail\n print('GP update fails...\\n')\n A = A_pre + 2 * deltakk * grad\n A = (A + A.T) / 2\n b, V = np.linalg.eig(A)\n # b = np.diag(b)\n b = b * (b > 0)\n A = np.dot(V @ np.diag(b), V.T)\n\n objL = self.computeObj_GP(eta, mu, lambda_, S, D, A)\n\n elif objL >= objL_best: # beat best\n A_best = A\n objL_best = objL\n\n normOut = np.linalg.norm(A_pre - A, ord='fro')\n A_pre = A\n objL_pre = objL\n\n A_Final = A_best\n\n return A_Final, iterGP", "def solver_auto_param(u_init, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, eta_step = 0.5, eta_step_tumor = 0.99, ftol = 1e-3, max_iter = 300, verbose = 0, nnls_max_iter=30):\n auto_param_obj_history = []\n auto_param_relaxed_obj_history = []\n \n eta_0 = (1/(2*np.max(B)))*0.5 #Initialize eta_0\n eta = np.array([eta_0/len(H)]*len(H))*0.9\n eta_lin = np.ones(L_lhs.shape[0])*0.01\n \n u, w_0, w, w_lin, obj_history, relaxed_obj_history = solver(u_init, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = ftol, max_iter = max_iter, verbose = verbose, nnls_max_iter=nnls_max_iter)\n # solver(u_init, eta_0, eta, T, H, alpha, gamma, B, D, C, ftol = 1e-3, max_iter = 300, verbose = verbose)\n auto_param_obj_history.append(obj_history)\n auto_param_relaxed_obj_history.append(relaxed_obj_history)\n cnstr = constraints_all(u, H, gamma, D, C, tol = 0.05, verbose = 0)\n cnstr_linear = linear_constraint(u, L_lhs, L_rhs, tol = 0.05)\n \n print('Enforcing Feasibility')\n count = 0\n num_violated = -1\n while (len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear))):\n count += 1\n num_violated_prev = np.copy(num_violated)\n num_violated_oar = len(H) - cnstr['Relaxed'].sum()\n num_violated_lin = L_lhs.shape[0] - np.sum(cnstr_linear)#(1 - int(cnstr_linear))\n num_violated = len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear))#(1 - int(cnstr_linear))\n \n print('Iter ', count, '# of violated constr:', len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear)))\n print(' Linear constraints on u violation:', L_lhs.shape[0] - np.sum(cnstr_linear))\n eta[cnstr['Relaxed'] == False] *= eta_step\n eta_lin[cnstr_linear == False] *= eta_step\n # eta_0 *= eta_step*2\n # eta_lin *= eta_step\n \n if num_violated == num_violated_prev:\n print('Increase enforcement')\n if num_violated_lin > 0:\n eta_lin[cnstr_linear == False] *= eta_step\n # eta_0 *= eta_step*2\n #eta_lin *= eta_step\n if num_violated_oar > 0:\n eta[cnstr['Relaxed'] == False] *= eta_step\n # eta_0 *= eta_step*2\n \n u, w_0, w, w_lin, obj_history, relaxed_obj_history = solver(u, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = ftol, max_iter = max_iter, verbose = verbose, nnls_max_iter=nnls_max_iter)\n # solver(u, eta_0, eta, T, H, alpha, gamma, B, D, C, ftol = ftol, max_iter = max_iter, verbose = verbose)\n auto_param_obj_history.append(obj_history)\n auto_param_relaxed_obj_history.append(relaxed_obj_history)\n cnstr = constraints_all(u, H, gamma, D, C, tol = 0.05, verbose = 0)\n cnstr_linear = linear_constraint(u, L_lhs, L_rhs, tol = 0.05)\n \n print('Enforcing Optimality')\n count = 0\n while not (len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear))):\n # (cnstr['Relaxed'].sum()-len(H)): #If nothing is violated -- enforce optimality!\n count += 1\n print('Opt Iter', count)\n obj_prev = obj_u_opt_N_fixed(u, T, alpha, B)\n u_prev = np.copy(u)\n eta_0 *= eta_step_tumor\n print('Current eta_0:', eta_0)\n if (2*eta_0)**2 <= 1e-80:\n print('zero reached')\n break\n # u, w_0, w, w_lin, obj_history, relaxed_obj_history = solver(u, eta_0, eta, T, H, alpha, gamma, B, D, C, ftol = ftol, max_iter = max_iter, verbose = verbose)\n u, w_0, w, w_lin, obj_history, relaxed_obj_history = solver(u, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = ftol, max_iter = max_iter//2, verbose = verbose, nnls_max_iter=nnls_max_iter)\n auto_param_obj_history.append(obj_history)\n auto_param_relaxed_obj_history.append(relaxed_obj_history)\n \n obj_new = obj_u_opt_N_fixed(u, T, alpha, B)\n if (abs(obj_new - obj_prev)/abs(obj_prev) <= 1e-4) or (obj_new > obj_prev): #two consequent iters, two times bc on iter 2 it stops anyway\n print('No improvement, increase enforcement')\n eta_step_tumor *= 0.1\n eta_0 *= eta_step_tumor\n if (2*eta_0)**2 <= 1e-80:\n print('zero reached')\n break\n # break\n \n cnstr = constraints_all(u, H, gamma, D, C, tol = 0.05, verbose = 0)\n cnstr_linear = linear_constraint(u, L_lhs, L_rhs, tol = 0.05)\n print('# of violated constr:', len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear)))#(1 - int(cnstr_linear)))\n \n print('Finding the correct solution:')\n u = u_prev\n eta_0 = eta_0/eta_step_tumor\n \n cnstr = constraints_all(u, H, gamma, D, C, tol = 0.05, verbose = 0)\n cnstr_linear = linear_constraint(u, L_lhs, L_rhs, tol = 0.05)\n print('# of violated constr:', len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear)))\n # print('# of violated constr:', cnstr['Relaxed'].sum()-len(H))\n print(\"OBJJJJJ:\", obj_u_opt_N_fixed(u, T, alpha, B))\n return u, w_0, w, w_lin, eta_0, eta, eta_lin, auto_param_obj_history, auto_param_relaxed_obj_history", "def NLSA(self,N=1,q=12,l=None):\n\n X = self.data[self.IDs]\n X = np.flip(X.T[(np.arange(q))+np.arange(np.max(self.dimT-(q-1),0)).reshape(-1,1)].reshape(self.dimT-q+1,self.n*q).T,0)\n K = np.zeros((self.dimT-q,self.dimT-q))\n for i in range(self.dimT-q):\n xi = np.atleast_2d(X[:,1+i])\n xi_m1 = np.atleast_2d(X[:,1+i-1])\n elli = cdist(xi,xi_m1,'euclidean')\n for j in range(self.dimT-q):\n xj = np.atleast_2d(X[:,1+j])\n xj_m1 = np.atleast_2d(X[:,1+j-1])\n ellj = cdist(xj,xj_m1,'euclidean')\n K[i,j] = np.exp(-cdist(xi,xj,'sqeuclidean')/(elli*ellj))\n\n Qi,Qj = np.meshgrid(np.sum(K,axis=1),np.sum(K,axis=1))\n K_tilde = K/(Qi*Qj)\n P = K_tilde/np.atleast_2d(np.sum(K_tilde,axis=1)).T #transition (probability) matrix\n L = np.eye(self.dimT-q) - P\n Lambda, phi = np.linalg.eig(L) #Lϕ = λϕ\n Z, mu = np.linalg.eig(P) #μP = μ\n mu = mu[:,np.isclose(Z,1,atol=1e-12)].ravel() #take eigenvector corresponding to where eigenvalue = 1.\n mu = mu / np.sum(mu) #to make the sum of μ equal to 1 (it is a vector of probabilities)\n \n if l is None:\n l = self.dimT-q\n else:\n l = l\n \n A = np.linalg.multi_dot([X[:,1:],np.diag(mu),phi[:,-l:]]) #project X onto leading l Laplacian eigenfunctions\n U,S,V = np.linalg.svd(A,full_matrices=False)\n\n EEOFs = np.zeros((self.dimX,self.dimY,self.dimT,N)) ; EEOFs[:,:,0,:] = np.nan \n X_rec = np.zeros((self.n,self.dimT,self.dimT-q)) ; X_rec[:,0,:] = np.nan\n #note that we set the first time stamp (i.e. year 1) to nan, as we have used this to compute the \n #phase velocities (elli,ellj). \n for k in range(self.dimT-q):\n Xk = S[k]*np.dot(np.atleast_2d(U[:,k]).T,np.atleast_2d(V.T[:,k]))\n offset1 = 0\n offset2 = 1\n for t in range(1,self.dimT):\n if t == 1:\n X_rec[:,t,k] = Xk[-self.n:,0]\n elif (t > 1) & (t < q):\n x_kj = np.zeros((self.n,t+1))\n start = self.n*q - (t+1)*self.n\n for l in range(t+1):\n x_kj[:,l] = Xk[start:start+self.n,l]\n start += self.n\n X_rec[:,t,k] = np.mean(x_kj,1)\n elif (t >= q) & (t <= self.dimT-q):\n x_kj = np.zeros((self.n,q))\n start = 0\n for l in range(offset1,q+offset1):\n x_kj[:,l-offset1] = Xk[start:start+self.n,l]\n start += self.n\n offset1 += 1\n X_rec[:,t,k] = np.mean(x_kj,1)\n elif (t > self.dimT-q) & (t < self.dimT-1):\n x_kj = np.zeros((self.n,q-offset2))\n start = 0\n for l in range(offset1,(q-offset2)+offset1):\n x_kj[:,l-offset1] = Xk[start:start+self.n,l]\n start += self.n\n offset1 += 1\n offset2 += 1\n X_rec[:,t,k] = np.mean(x_kj,1)\n elif t == self.dimT-1:\n X_rec[:,t,k] = Xk[:self.n,-1]\n\n EEOFs[self.IDs] = (np.flip(X_rec,0)/self.weights[self.IDs][:,np.newaxis,np.newaxis])[:,:,:N]\n \n return EEOFs", "def analyticalLinearSol(self, t):\n return self.c*t + self.I", "def sparsetriangularsolvedense(self,transposed_,lnzc,lptrc,lsubc,lvalc,b): # 3\n if not isinstance(transposed_,transpose): raise TypeError(\"Argument transposed has wrong type\")\n n_ = None\n if n_ is None:\n n_ = len(b)\n elif n_ != len(b):\n raise IndexError(\"Inconsistent length of array b\")\n if n_ is None:\n n_ = len(lnzc)\n elif n_ != len(lnzc):\n raise IndexError(\"Inconsistent length of array lnzc\")\n if n_ is None:\n n_ = len(lptrc)\n elif n_ != len(lptrc):\n raise IndexError(\"Inconsistent length of array lptrc\")\n if n_ is None: n_ = 0\n if lnzc is None: raise TypeError(\"Invalid type for argument lnzc\")\n if lnzc is None:\n lnzc_ = None\n else:\n try:\n lnzc_ = memoryview(lnzc)\n except TypeError:\n try:\n _tmparr_lnzc = array.array(\"i\",lnzc)\n except TypeError:\n raise TypeError(\"Argument lnzc has wrong type\")\n else:\n lnzc_ = memoryview(_tmparr_lnzc)\n \n else:\n if lnzc_.format != \"i\":\n lnzc_ = memoryview(array.array(\"i\",lnzc))\n \n if lnzc_ is not None and len(lnzc_) != (n_):\n raise ValueError(\"Array argument lnzc has wrong length\")\n if lptrc is None: raise TypeError(\"Invalid type for argument lptrc\")\n if lptrc is None:\n lptrc_ = None\n else:\n try:\n lptrc_ = memoryview(lptrc)\n except TypeError:\n try:\n _tmparr_lptrc = array.array(\"q\",lptrc)\n except TypeError:\n raise TypeError(\"Argument lptrc has wrong type\")\n else:\n lptrc_ = memoryview(_tmparr_lptrc)\n \n else:\n if lptrc_.format != \"q\":\n lptrc_ = memoryview(array.array(\"q\",lptrc))\n \n if lptrc_ is not None and len(lptrc_) != (n_):\n raise ValueError(\"Array argument lptrc has wrong length\")\n lensubnval_ = None\n if lensubnval_ is None:\n lensubnval_ = len(lsubc)\n elif lensubnval_ != len(lsubc):\n raise IndexError(\"Inconsistent length of array lsubc\")\n if lensubnval_ is None:\n lensubnval_ = len(lvalc)\n elif lensubnval_ != len(lvalc):\n raise IndexError(\"Inconsistent length of array lvalc\")\n if lensubnval_ is None: lensubnval_ = 0\n if lsubc is None: raise TypeError(\"Invalid type for argument lsubc\")\n if lsubc is None:\n lsubc_ = None\n else:\n try:\n lsubc_ = memoryview(lsubc)\n except TypeError:\n try:\n _tmparr_lsubc = array.array(\"i\",lsubc)\n except TypeError:\n raise TypeError(\"Argument lsubc has wrong type\")\n else:\n lsubc_ = memoryview(_tmparr_lsubc)\n \n else:\n if lsubc_.format != \"i\":\n lsubc_ = memoryview(array.array(\"i\",lsubc))\n \n if lsubc_ is not None and len(lsubc_) != (lensubnval_):\n raise ValueError(\"Array argument lsubc has wrong length\")\n if lvalc is None: raise TypeError(\"Invalid type for argument lvalc\")\n if lvalc is None:\n lvalc_ = None\n else:\n try:\n lvalc_ = memoryview(lvalc)\n except TypeError:\n try:\n _tmparr_lvalc = array.array(\"d\",lvalc)\n except TypeError:\n raise TypeError(\"Argument lvalc has wrong type\")\n else:\n lvalc_ = memoryview(_tmparr_lvalc)\n \n else:\n if lvalc_.format != \"d\":\n lvalc_ = memoryview(array.array(\"d\",lvalc))\n \n if lvalc_ is not None and len(lvalc_) != (lensubnval_):\n raise ValueError(\"Array argument lvalc has wrong length\")\n if b is None: raise TypeError(\"Invalid type for argument b\")\n _copyback_b = False\n if b is None:\n b_ = None\n else:\n try:\n b_ = memoryview(b)\n except TypeError:\n try:\n _tmparr_b = array.array(\"d\",b)\n except TypeError:\n raise TypeError(\"Argument b has wrong type\")\n else:\n b_ = memoryview(_tmparr_b)\n _copyback_b = True\n else:\n if b_.format != \"d\":\n b_ = memoryview(array.array(\"d\",b))\n _copyback_b = True\n if b_ is not None and len(b_) != (n_):\n raise ValueError(\"Array argument b has wrong length\")\n res = self.__obj.sparsetriangularsolvedense(transposed_,n_,lnzc_,lptrc_,lensubnval_,lsubc_,lvalc_,b_)\n if res != 0:\n raise Error(rescode(res),\"\")\n if _copyback_b:\n b[:] = _tmparr_b", "def fit_all_synthesis(t, l, bounds=(0, np.inf), alpha_0=1, beta_0=1, gamma_0=1):\n l = l.A if issparse(l) else l\n\n tau = np.hstack((0, t))\n x = np.hstack((0, l))\n\n f_lsq = lambda p: sol_u(tau, 0, p[0], p[1]) + sol_s(tau, 0, 0, p[0], p[1], p[2]) - x\n ret = least_squares(f_lsq, np.array([alpha_0, beta_0, gamma_0]), bounds=bounds)\n return ret.x[0], ret.x[1], ret.x[2]", "def calc_lampam(ss, constraints=None):\n if constraints is None:\n return calc_lampam_2(ss)\n\n if isinstance(ss, list):\n lampam = np.zeros((len(ss), 12), float)\n for index in range(len(ss)):\n lampam[index] = calc_lampam(ss[index], constraints)\n return lampam\n if ss.ndim == 2 and ss.shape[0] > 1:\n lampam = np.zeros((ss.shape[0], 12), float)\n for index in range(ss.shape[0]):\n lampam[index] = calc_lampam(ss[index], constraints)\n return lampam\n n_plies_in_panels = np.size(ss) # laminate ply count\n\n if not constraints.sym:\n cos_sin = np.empty((4, n_plies_in_panels), float)\n for ind in range(n_plies_in_panels):\n cos_sin[:, ind] = np.copy(constraints.cos_sin[\n constraints.ind_angles_dict[ss[ind]]].reshape((4, )))\n\n for_the_top = np.arange(n_plies_in_panels)\n z_0 = np.ones(n_plies_in_panels)\n z_2 = ((1-n_plies_in_panels/2)*z_0+for_the_top)**3 \\\n - ((1-n_plies_in_panels/2)*z_0+for_the_top - 1)**3\n z_1 = ((1-n_plies_in_panels/2)*z_0+for_the_top)**2 \\\n - ((1-n_plies_in_panels/2)*z_0+for_the_top - 1)**2\n return np.array([\n (1/n_plies_in_panels)*np.matmul(cos_sin, z_0),\n (2/n_plies_in_panels**2)*np.matmul(cos_sin, z_1),\n (4/n_plies_in_panels**3)*np.matmul(cos_sin, z_2)]).reshape(12)\n\n cos_sin = np.empty((4, np.size(ss) // 2), float)\n for ind in range(np.size(ss) // 2):\n cos_sin[:, ind] = constraints.cos_sin[\n constraints.ind_angles_dict[ss[ind]]].reshape((4,))\n\n for_the_top = np.arange(np.size(ss) // 2)\n z_0 = np.ones(np.size(ss) // 2)\n z_2 = ((1 - n_plies_in_panels / 2) * z_0 + for_the_top) ** 3 \\\n - ((1 - n_plies_in_panels / 2) * z_0 + for_the_top - 1) ** 3\n lampam = np.array([\n (2/n_plies_in_panels)*np.matmul(cos_sin, z_0),\n np.array([0, 0, 0, 0]),\n (8/n_plies_in_panels**3)*np.matmul(cos_sin, z_2)]).reshape(12)\n\n if np.size(ss) % 2:\n cos_sin_mid = constraints.cos_sin[\n constraints.ind_angles_dict[ss[n_plies_in_panels // 2]]]\n lampam += np.array([\n (1/n_plies_in_panels)*cos_sin_mid,\n np.zeros((4,), dtype=float),\n (1/n_plies_in_panels**3)*cos_sin_mid]).reshape(12)\n return lampam", "def update_latent(self, variable_mat, weight_mat, output_mat, y_list):\n new_latent = {k: np.zeros(self.H_mat[k].shape) for k in self.H_mat}\n\n new_latent['sigma'] = np.linalg.inv(\n np.diag([self.sigma_h ** -1 for _ in range(self.R)])\n + reduce(lambda x, y: x + y,\n [np.outer(weight_mat['mu'][1:, i],\n weight_mat['mu'][1:, i])\n + weight_mat['sigma'][i][1:, 1:]\n for i in range(self.task_count)])\n )\n\n new_latent['mu'] = np.dot(\n new_latent['sigma'],\n np.dot(variable_mat['mu'].transpose(),\n self.kernel_mat) / self.sigma_h\n + reduce(\n lambda x, y: x + y,\n [np.outer(weight_mat['mu'][1:, i], output_mat['mu'][i, :])\n - np.repeat(a=np.array([\n [x * weight_mat['mu'][0, i] + y for x, y in\n zip(weight_mat['mu'][1:, i],\n weight_mat['sigma'][i, 1:, 0])]]\n ), repeats=self.sample_count, axis=0).transpose()\n for i in range(self.task_count)]\n )\n )\n\n return new_latent", "def magma_sgels(trans, m, n, nrhs, A, lda, B, ldb, hwork, lwork):\n info = c_int_type()\n trans = _trans_conversion[trans]\n status = _libmagma.magma_sgels(trans, m, n, nrhs, int(A), lda,\n int(B), ldb, int(hwork), lwork,\n ctypes.byref(info))\n magmaCheckStatus(status)", "def solveLinearSingular(aMat, bVec, isParameterized=False, defaultValue=1):\n solution = aMat.gauss_jordan_solve(bVec)\n solutionVec = solution[0]\n if not isParameterized:\n parameterMat = solution[1]\n for parameter in parameterMat:\n solutionVec = solutionVec.subs(parameter, defaultValue)\n solutionVec = solutionVec.evalf()\n return solutionVec", "def kl_latent_space(network, *args):\n\n z, log_det_J = network(*args)\n loss = tf.reduce_mean(0.5 * tf.square(tf.norm(z, axis=-1)) - log_det_J)\n return loss", "def LotkaVolterra_Dynamics(self):\n LV_c = self.toConceptual(self.state) # (nF, nR)\n LV_c = LV_c.mul((1 - LV_c) + self.LV_inhM.mm(LV_c))\n LV_s = self.toNeural(LV_c)\n\n return LV_c, LV_s", "def calc_linear_dispersion(self):\n self._check_k_columns([\"K0L\", \"K0SL\", \"K1SL\"])\n tw = self.twiss_df\n phs_adv = self.get_phase_adv()\n res = self._results_df\n coeff_fun = self._linear_dispersion_coeff\n sum_fun = self._linear_dispersion_sum\n\n # Calculate\n LOG.debug(\"Calculate Linear Dispersion\")\n with timeit(lambda t: LOG.debug(\" Time needed: {:f}\".format(t))):\n # sources\n k0_mask = tw['K0L'] != 0\n k0s_mask = tw['K0SL'] != 0\n k1s_mask = tw['K1SL'] != 0\n\n mx_mask = k0_mask | k1s_mask # magnets contributing to Dx,j (-> Dy,m)\n my_mask = k0s_mask | k1s_mask # magnets contributing to Dy,j (-> Dx,m)\n\n if not any(mx_mask | my_mask):\n LOG.warning(\" No linear dispersion contributions found. Values will be zero.\")\n res['DX'] = 0.\n res['DY'] = 0.\n self._log_added('DX', 'DY')\n return\n\n # create temporary DataFrame for magnets with coefficients already in place\n df = tfs.TfsDataFrame(index=tw.index).join(\n coeff_fun(tw.loc[:, 'BETX'], tw.Q1)).join(\n coeff_fun(tw.loc[:, 'BETY'], tw.Q2))\n df.columns = ['COEFFX', 'COEFFY']\n\n LOG.debug(\" Calculate uncoupled linear dispersion\")\n df.loc[my_mask, 'DX'] = df.loc[my_mask, 'COEFFX'] * \\\n sum_fun(tw.loc[mx_mask, 'K0L'],\n 0,\n 0,\n tw.loc[mx_mask, 'BETX'],\n tau(phs_adv['X'].loc[mx_mask, my_mask], tw.Q1)\n ).transpose()\n df.loc[mx_mask, 'DY'] = df.loc[mx_mask, 'COEFFY'] * \\\n sum_fun(-tw.loc[my_mask, 'K0SL'], # MINUS!\n 0,\n 0,\n tw.loc[my_mask, 'BETY'],\n tau(phs_adv['Y'].loc[my_mask, mx_mask], tw.Q2)\n ).transpose()\n\n LOG.debug(\" Calculate full linear dispersion values\")\n res.loc[:, 'DX'] = df.loc[:, 'COEFFX'] * \\\n sum_fun(tw.loc[mx_mask, 'K0L'],\n tw.loc[mx_mask, 'K1SL'],\n df.loc[mx_mask, 'DY'],\n tw.loc[mx_mask, 'BETX'],\n tau(phs_adv['X'].loc[mx_mask, :], tw.Q1)\n ).transpose()\n res.loc[:, 'DY'] = df.loc[:, 'COEFFY'] * \\\n sum_fun(-tw.loc[my_mask, 'K0SL'], # MINUS!\n tw.loc[my_mask, 'K1SL'],\n df.loc[my_mask, 'DX'],\n tw.loc[my_mask, 'BETY'],\n tau(phs_adv['Y'].loc[my_mask, :], tw.Q2)\n ).transpose()\n\n LOG.debug(\" Average linear dispersion Dx: {:g}\".format(\n np.mean(res['DX'])))\n LOG.debug(\" Average linear dispersion Dy: {:g}\".format(\n np.mean(res['DY'])))\n self._log_added('DX', 'DY')", "def do_test_latents(self, Y):\r\n assert not self.likelihood.is_heteroscedastic\r\n N_test = Y.shape[0]\r\n input_dim = self.Z.shape[1]\r\n means = np.zeros((N_test, input_dim))\r\n covars = np.zeros((N_test, input_dim))\r\n\r\n dpsi0 = -0.5 * self.input_dim * self.likelihood.precision\r\n dpsi2 = self.dL_dpsi2[0][None, :, :] # TODO: this may change if we ignore het. likelihoods\r\n V = self.likelihood.precision * Y\r\n\r\n #compute CPsi1V\r\n if self.Cpsi1V is None:\r\n psi1V = np.dot(self.psi1.T, self.likelihood.V)\r\n tmp, _ = linalg.dtrtrs(self._Lm, np.asfortranarray(psi1V), lower=1, trans=0)\r\n tmp, _ = linalg.dpotrs(self.LB, tmp, lower=1)\r\n self.Cpsi1V, _ = linalg.dtrtrs(self._Lm, tmp, lower=1, trans=1)\r\n\r\n dpsi1 = np.dot(self.Cpsi1V, V.T)\r\n\r\n start = np.zeros(self.input_dim * 2)\r\n\r\n for n, dpsi1_n in enumerate(dpsi1.T[:, :, None]):\r\n args = (self.kern, self.Z, dpsi0, dpsi1_n.T, dpsi2)\r\n xopt, fopt, neval, status = SCG(f=latent_cost, gradf=latent_grad, x=start, optargs=args, display=False)\r\n\r\n mu, log_S = xopt.reshape(2, 1, -1)\r\n means[n] = mu[0].copy()\r\n covars[n] = np.exp(log_S[0]).copy()\r\n\r\n return means, covars", "def ObjectiveGLASSO_block(As, bs, Ts, lam, groups, x):\n \n obj = 0\n for j in range(len(As)):\n obj = obj + 0.5*Norm(As[j].dot(x[Ts[j]])-bs[j])**2\n\n obj = obj + lam*np.sum([Norm(x[g]) for g in groups])\n return obj", "def update(self, dLds, alpha, beta):\n T = len(self.x)\n self.nodes.reset_error()\n self.igate.reset_error()\n self.fgate.reset_error()\n self.ogate.reset_error()\n dLdx = np.zeros((T, self.input_size))\n dLdc = np.zeros(self.hidden_size)\n for t in xrange(T-1, -1, -1):\n dLdpo = dLds[t] * self.h[t] * self.gatefun.derivate(self.o[t])\n # parameters for output gate\n self.ogate.dLdu += np.outer(dLdpo, self.x[t])\n self.ogate.dLdw += np.outer(dLdpo, self.s[t-1])\n self.ogate.dLdv += np.outer(dLdpo, self.c[t-1])\n dLds[t-1] += np.dot(self.ogate.w.T, dLdpo)\n dLdx[t] += np.dot(self.ogate.u.T, dLdpo)\n dLdc += np.dot(self.ogate.v.T, dLdpo)\n\n dLdc += dLds[t] * self.o[t] * self.acfun.derivate(self.h[t])\n dLdpi = dLdc * self.g[t] * self.gatefun.derivate(self.i[t])\n dLdpf = dLdc * self.c[t-1] * self.gatefun.derivate(self.f[t])\n dLdpg = dLdc * self.i[t] * self.acfun.derivate(self.g[t])\n dLdc = dLdc * self.f[t]\n # parameters for nodes in hidden layer\n self.nodes.dLdu += np.outer(dLdpg, self.x[t])\n self.nodes.dLdw += np.outer(dLdpg, self.s[t-1])\n dLds[t-1] += np.dot(self.nodes.w.T, dLdpg)\n dLdx[t] += np.dot(self.nodes.u.T, dLdpg)\n # parameters for input gate\n self.igate.dLdu += np.outer(dLdpi, self.x[t])\n self.igate.dLdw += np.outer(dLdpi, self.s[t-1])\n self.igate.dLdv += np.outer(dLdpi, self.c[t-1])\n dLds[t-1] += np.dot(self.igate.w.T, dLdpi)\n dLdx[t] += np.dot(self.igate.u.T, dLdpi)\n dLdc += np.dot(self.igate.v.T, dLdpi)\n # parameters for forget gate\n self.fgate.dLdu += np.outer(dLdpf, self.x[t])\n self.fgate.dLdw += np.outer(dLdpf, self.s[t-1])\n self.fgate.dLdv += np.outer(dLdpf, self.c[t-1])\n dLds[t-1] += np.dot(self.fgate.w.T, dLdpf)\n dLdx[t] += np.dot(self.fgate.u.T, dLdpf)\n dLdc += np.dot(self.fgate.v.T, dLdpf)\n if self.en_bias:\n self.nodes.dLdb += dLdpg\n self.igate.dLdb += dLdpi\n self.fgate.dLdb += dLdpf\n self.ogate.dLdb += dLdpo\n # update weight matrix of current hidden node\n self.nodes.update(alpha, beta)\n self.igate.update(alpha, beta)\n self.fgate.update(alpha, beta)\n self.ogate.update(alpha, beta)\n return dLdx", "def pdmse_spgl1(N, theta=None, **kwargs):\n s = kwargs.get('s', 1)\n eta = kwargs.get('eta', 1)\n spgParms = kwargs.get('spgParms', [])\n x = np.zeros(N)\n x[-s:] = N\n z = np.random.randn(N)\n if theta is None:\n theta = np.sqrt(N)\n elif theta is 'sqNormZ':\n theta = np.linalg.norm(z)\n elif isinstance(theta, str):\n raise ValueError('theta must be numeric or equal to \\'sqNormZ\\'.')\n y = x + eta*z\n xstar = spgl1(np.eye(N), y, sigma=theta, options=spgParms)[0]\n return np.linalg.norm(x - xstar)**2", "def experiment_linear_conv_ls(_):\n # Min dft1-norm solution found (norm=1.9895)\n adv_norm_type = 'dftinf'\n dual_norm_type = 'dft1'\n attack_step_dir = 'dftinf_sd' # 'dftinf'\n\n module_name = 'train'\n # log_dir = 'runs_linear_conv_ls_%s' % adv_norm_type\n log_dir = 'runs_linear_conv_ls_normfix_%s' % adv_norm_type\n exclude = '*'\n\n d_over_n = [1, 2, 4, 8, 16, 32] # separable >= 1\n dim = 100\n num_train = [int(dim / p) for p in d_over_n]\n\n # Config params\n shared_params = []\n shared_params += [\n ('config', './config.py'),\n ('seed', list(range(3))),\n ]\n\n # Data hyper-parameters\n shared_params += [\n ('temperature', 0.0001),\n ('num_test', 1), # 500\n ('dim', dim),\n ('num_train', num_train),\n ]\n\n # Adversarial configuration: test\n shared_params += nameit('adv', [\n ('norm_type', adv_norm_type),\n # ('lr', 0.1),\n ('niters', 1), # 10\n # ('eps_iter', attack_eps), # Overwritten by cvxpy\n # ('eps_tot', attack_eps), # Overwritten by cvxpy\n ('pre_normalize', True), # multi attacks\n ('post_normalize', True),\n ('eps_from_cvxpy', True),\n ('step_dir', attack_step_dir),\n ])\n\n # Logging to standard output\n shared_params += [\n ('log_interval', 10000), # 1000),\n ('log_keys', '\\'(\"%s\")\\'' % ('\",\"'.join([\n 'risk/train/zero_one',\n 'risk/train/adv/%s' % adv_norm_type,\n 'weight/linear/norm/%s' % dual_norm_type,\n 'margin/%s' % dual_norm_type,\n ]))),\n # Compare with cvxpy\n ('enable_cvxpy', True),\n ]\n\n # Model hyper-parameters\n conv_linear_params = nameit('model', [\n ('arch', 'conv_linear'),\n ('nlayers', 2),\n ('regularizer', 'none'),\n ])\n\n params = []\n\n # GD line search implicit bias\n gd_ls = nameit('optim', [\n ('name', 'gd_ls'),\n ('niters', 100000),\n ('bound_step', True),\n ])\n params += [OrderedDict(shared_params+conv_linear_params+gd_ls)]\n\n return params, log_dir, module_name, exclude", "def SF_ML(jd,mag,errmag,x0=[0.5, 0.5],bnds=((0.0, 3.0), (0.0,3.0))):\n\n dtarray, dmagarray, sigmaarray = SFarray(jd,mag,errmag)\n ndt=np.where((dtarray<=365))\n dtarray=dtarray[ndt]\n dmagarray=dmagarray[ndt]\n sigmaarray=sigmaarray[ndt]\n\n\n x0 = [0.5, 0.5]\n bnds = ((0.0, 3.0), (0.0,3.0))\n\n #res = sp.optimize.minimize(neg_lnlike, x0, args=(dtarray, dmagarray, sigmaarray),\n # method='L-BFGS-B', bounds=bnds, options={'ftol': 1e-15, 'gtol': 1e-10, 'eps': 1e-08, 'maxfun': 150000, 'maxiter': 150000, 'maxls': 40})\n\n res = sp.optimize.minimize(neg_lnlike, x0, args=(dtarray, dmagarray, sigmaarray),\n method='Nelder-Mead', bounds=bnds, options={'fatol': 1e-10, 'xatol': 1e-10, 'maxiter': 15000})\n\n g_min = res.x[0]\n a_min = res.x[1]\n\n return(g_min, a_min)", "def ais_latent_network_given_A(x0, graph_model, graph_sampler, N_samples=1000, B=100,\n steps_per_B=11):\n import pdb; pdb.set_trace()\n betas = np.linspace(0,1,B)\n\n # Sample m points\n log_weights = np.zeros(N_samples)\n for m in range(N_samples):\n # Sample a new set of graph parameters from the prior\n x = copy.deepcopy(x0)\n\n # print \"M: %d\" % m\n # Sample mus from each of the intermediate distributions,\n # starting with a draw from the prior.\n samples = []\n\n # Ratios correspond to the 'f_{n-1}(x_{n-1})/f_{n}(x_{n-1})' values in Neal's paper\n ratios = np.zeros(B-1)\n\n # Sample the intermediate distributions\n for (n,beta) in zip(range(1,B), betas[1:]):\n # print \"M: %d\\tBeta: %.3f\" % (m,beta)\n sys.stdout.write(\"M: %d\\tBeta: %.3f \\r\" % (m,beta))\n sys.stdout.flush()\n # Set the likelihood scale (beta) in the graph model\n graph_model.lkhd_scale.set_value(beta)\n\n # Take 100 steps per beta\n for s in range(steps_per_B):\n x = graph_sampler.update(x)\n\n # Compute the ratio of this sample under this distribution and the previous distribution\n curr_lkhd = seval(graph_model.log_p,\n graph_model.get_variables(),\n x['net']['graph'])\n\n graph_model.lkhd_scale.set_value(betas[n-1])\n prev_lkhd = seval(graph_model.log_p,\n graph_model.get_variables(),\n x['net']['graph'])\n\n ratios[n-1] = curr_lkhd - prev_lkhd\n\n # Compute the log weight of this sample\n log_weights[m] = np.sum(ratios)\n\n print \"\"\n print \"W: %f\" % log_weights[m]\n\n # Compute the mean of the weights to get an estimate of the normalization constant\n log_Z = -np.log(N_samples) + logsumexp(log_weights)\n return log_Z", "def _solve(matrix, result):\n\n # Get valid indices\n idx = np.nonzero(result)[0]\n\n # Init solution with NaNs.\n sln = np.ones(result.shape[-1]) * np.nan\n\n # Only solve for valid indices, i.e. wavelengths that are\n # covered by the pixels on the detector.\n # It will be a singular matrix otherwise.\n sln[idx] = spsolve(matrix[idx, :][:, idx], result[idx])\n\n return sln", "def get_d_moomtl(grads):\r\n \r\n nobj, dim = grads.shape\r\n \r\n# # use cvxopt to solve QP\r\n# P = np.dot(grads , grads.T)\r\n# \r\n# q = np.zeros(nobj)\r\n# \r\n# G = - np.eye(nobj)\r\n# h = np.zeros(nobj)\r\n# \r\n# \r\n# A = np.ones(nobj).reshape(1,2)\r\n# b = np.ones(1)\r\n# \r\n# cvxopt.solvers.options['show_progress'] = False\r\n# sol = cvxopt_solve_qp(P, q, G, h, A, b)\r\n \r\n # use MinNormSolver to solve QP\r\n sol, nd = MinNormSolver.find_min_norm_element(grads)\r\n \r\n return sol", "def g_solving_subproblem_of_ALR(self,vehicle_id):\r\n global_LB = -10000\r\n global_UB = 10000\r\n iteration_for_RSP = 20\r\n optimal_solution_for_RSP = None\r\n self.multiplier_v = 0.5\r\n\r\n # solve the expected shortest path problem\r\n self.g_dynamic_programming_algorithm(vehicle_id, 3)\r\n\r\n # obtain the variance\r\n y_ =self.g_ending_state_vector[vehicle_id].VSStateVector[0].Primal_Label_cost_variance\r\n\r\n for k in range(iteration_for_RSP):\r\n # print(k)\r\n LB = 0\r\n # step 2: solve decomposed dual problems\r\n # Part I: subproblem of x\r\n self.g_dynamic_programming_algorithm(vehicle_id, 1)\r\n LB += self.g_ending_state_vector[vehicle_id].VSStateVector[0].Label_cost_for_searching\r\n\r\n # Part II: subproblem of y\r\n obj_of_y_ = self.reliability * (y_) ** 0.5 - self.multiplier_v * y_\r\n if obj_of_y_ > 0:\r\n y = 0\r\n LB += 0\r\n else:\r\n y = y_\r\n LB += obj_of_y_\r\n\r\n # generate an upper bound\r\n variance = self.g_ending_state_vector[vehicle_id].VSStateVector[0].Primal_Label_cost_variance\r\n Label_cost_for_lagrangian_mean = self.g_ending_state_vector[vehicle_id].VSStateVector[0].Label_cost_for_searching_mean\r\n UB = Label_cost_for_lagrangian_mean + self.reliability * (variance) ** 0.5\r\n\r\n # print(\"UB:{}\".format(UB))\r\n # print(\"LB:{}\".format(LB))\r\n\r\n # UB and LB update\r\n if LB > global_LB:\r\n global_LB = LB\r\n\r\n if UB < global_UB:\r\n global_UB = UB\r\n optimal_solution_for_RSP = self.g_ending_state_vector[vehicle_id].VSStateVector[0]\r\n\r\n # step 3: update multipliers\r\n if variance- y != 0:\r\n self.multiplier_v+= (global_UB - LB) / (variance-y)\r\n # if self.multiplier_v<0:\r\n # self.multiplier_v=1\r\n # print(self.multiplier_v)\r\n\r\n # step 4: termination condition test\r\n if global_UB != 0:\r\n gap = abs((global_UB - global_LB) / global_UB)\r\n # print(gap)\r\n if gap < 0.02:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP, global_LB\r\n else:\r\n if global_UB - global_LB == 0:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP, global_LB\r\n\r\n if k == iteration_for_RSP - 1:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP, global_LB", "def sparse_options(default_solver='spsolve',\n default_least_squares_solver='least_squares_lsmr' if HAVE_SCIPY_LSMR else 'least_squares_generic_lsmr',\n bicgstab_tol=1e-15,\n bicgstab_maxiter=None,\n spilu_drop_tol=1e-4,\n spilu_fill_factor=10,\n spilu_drop_rule='basic,area',\n spilu_permc_spec='COLAMD',\n spsolve_permc_spec='COLAMD',\n spsolve_keep_factorization=True,\n lgmres_tol=1e-5,\n lgmres_maxiter=1000,\n lgmres_inner_m=39,\n lgmres_outer_k=3,\n least_squares_lsmr_damp=0.0,\n least_squares_lsmr_atol=1e-6,\n least_squares_lsmr_btol=1e-6,\n least_squares_lsmr_conlim=1e8,\n least_squares_lsmr_maxiter=None,\n least_squares_lsmr_show=False,\n least_squares_lsqr_damp=0.0,\n least_squares_lsqr_atol=1e-6,\n least_squares_lsqr_btol=1e-6,\n least_squares_lsqr_conlim=1e8,\n least_squares_lsqr_iter_lim=None,\n least_squares_lsqr_show=False,\n pyamg_tol=1e-5,\n pyamg_maxiter=400,\n pyamg_verb=False,\n pyamg_rs_strength=('classical', {'theta': 0.25}),\n pyamg_rs_CF='RS',\n pyamg_rs_presmoother=('gauss_seidel', {'sweep': 'symmetric'}),\n pyamg_rs_postsmoother=('gauss_seidel', {'sweep': 'symmetric'}),\n pyamg_rs_max_levels=10,\n pyamg_rs_max_coarse=500,\n pyamg_rs_coarse_solver='pinv2',\n pyamg_rs_cycle='V',\n pyamg_rs_accel=None,\n pyamg_rs_tol=1e-5,\n pyamg_rs_maxiter=100,\n pyamg_sa_symmetry='hermitian',\n pyamg_sa_strength='symmetric',\n pyamg_sa_aggregate='standard',\n pyamg_sa_smooth=('jacobi', {'omega': 4.0/3.0}),\n pyamg_sa_presmoother=('block_gauss_seidel', {'sweep': 'symmetric'}),\n pyamg_sa_postsmoother=('block_gauss_seidel', {'sweep': 'symmetric'}),\n pyamg_sa_improve_candidates=[('block_gauss_seidel', {'sweep': 'symmetric', 'iterations': 4}), None],\n pyamg_sa_max_levels=10,\n pyamg_sa_max_coarse=500,\n pyamg_sa_diagonal_dominance=False,\n pyamg_sa_coarse_solver='pinv2',\n pyamg_sa_cycle='V',\n pyamg_sa_accel=None,\n pyamg_sa_tol=1e-5,\n pyamg_sa_maxiter=100):\n\n assert default_least_squares_solver.startswith('least_squares')\n\n opts = (('bicgstab_spilu', {'type': 'bicgstab_spilu',\n 'tol': bicgstab_tol,\n 'maxiter': bicgstab_maxiter,\n 'spilu_drop_tol': spilu_drop_tol,\n 'spilu_fill_factor': spilu_fill_factor,\n 'spilu_drop_rule': spilu_drop_rule,\n 'spilu_permc_spec': spilu_permc_spec}),\n ('bicgstab', {'type': 'bicgstab',\n 'tol': bicgstab_tol,\n 'maxiter': bicgstab_maxiter}),\n ('spsolve', {'type': 'spsolve',\n 'permc_spec': spsolve_permc_spec,\n 'keep_factorization': spsolve_keep_factorization}),\n ('lgmres', {'type': 'lgmres',\n 'tol': lgmres_tol,\n 'maxiter': lgmres_maxiter,\n 'inner_m': lgmres_inner_m,\n 'outer_k': lgmres_outer_k}),\n ('least_squares_lsqr', {'type': 'least_squares_lsqr',\n 'damp': least_squares_lsqr_damp,\n 'atol': least_squares_lsqr_atol,\n 'btol': least_squares_lsqr_btol,\n 'conlim': least_squares_lsqr_conlim,\n 'iter_lim': least_squares_lsqr_iter_lim,\n 'show': least_squares_lsqr_show}))\n\n if HAVE_SCIPY_LSMR:\n opts += (('least_squares_lsmr', {'type': 'least_squares_lsmr',\n 'damp': least_squares_lsmr_damp,\n 'atol': least_squares_lsmr_atol,\n 'btol': least_squares_lsmr_btol,\n 'conlim': least_squares_lsmr_conlim,\n 'maxiter': least_squares_lsmr_maxiter,\n 'show': least_squares_lsmr_show}),)\n\n if HAVE_PYAMG:\n opts += (('pyamg', {'type': 'pyamg',\n 'tol': pyamg_tol,\n 'maxiter': pyamg_maxiter}),\n ('pyamg-rs', {'type': 'pyamg-rs',\n 'strength': pyamg_rs_strength,\n 'CF': pyamg_rs_CF,\n 'presmoother': pyamg_rs_presmoother,\n 'postsmoother': pyamg_rs_postsmoother,\n 'max_levels': pyamg_rs_max_levels,\n 'max_coarse': pyamg_rs_max_coarse,\n 'coarse_solver': pyamg_rs_coarse_solver,\n 'cycle': pyamg_rs_cycle,\n 'accel': pyamg_rs_accel,\n 'tol': pyamg_rs_tol,\n 'maxiter': pyamg_rs_maxiter}),\n ('pyamg-sa', {'type': 'pyamg-sa',\n 'symmetry': pyamg_sa_symmetry,\n 'strength': pyamg_sa_strength,\n 'aggregate': pyamg_sa_aggregate,\n 'smooth': pyamg_sa_smooth,\n 'presmoother': pyamg_sa_presmoother,\n 'postsmoother': pyamg_sa_postsmoother,\n 'improve_candidates': pyamg_sa_improve_candidates,\n 'max_levels': pyamg_sa_max_levels,\n 'max_coarse': pyamg_sa_max_coarse,\n 'diagonal_dominance': pyamg_sa_diagonal_dominance,\n 'coarse_solver': pyamg_sa_coarse_solver,\n 'cycle': pyamg_sa_cycle,\n 'accel': pyamg_sa_accel,\n 'tol': pyamg_sa_tol,\n 'maxiter': pyamg_sa_maxiter}))\n opts = OrderedDict(opts)\n opts.update(genericsolvers.options())\n def_opt = opts.pop(default_solver)\n if default_least_squares_solver != default_solver:\n def_ls_opt = opts.pop(default_least_squares_solver)\n ordered_opts = OrderedDict(((default_solver, def_opt),\n (default_least_squares_solver, def_ls_opt)))\n else:\n ordered_opts = OrderedDict(((default_solver, def_opt),))\n ordered_opts.update(opts)\n return ordered_opts", "def singular_lsfit(flist, x, y, dy):\n # Initialization\n n = len(x)\n m = len(flist)\n A = np.zeros((n, m), dtype='float64')\n b = np.zeros(n, dtype='float64')\n c = np.zeros(m, dtype='float64')\n dc = np.zeros(m, dtype='float64')\n Rinv = np.zeros((m, m), dtype='float64')\n\n # Fill A and c\n for i in range(n):\n # Weight data by error\n b[i] = y[i] / dy[i]\n\n for j in range(m):\n A[i, j] = flist[j](x[i]) / dy[i]\n\n # Decompose using singular value decomposition\n U, e, V = singular_decomp(A)\n c = singular_solve(U, e, V, b)\n\n # Calculate the covariance matrix S\n VDinv = np.zeros((m, m), dtype='float64')\n for i in range(m):\n dinv_i = 1 / (e[i] * e[i])\n for i in range(m):\n VDinv[j, i] = V[j, i] * dinv_i\n S = np.dot(VDinv, np.transpose(V))\n\n # Calculate the uncertainties on the coefficients from S\n for i in range(m):\n dc[i] = np.sqrt(S[i, i])\n\n return c, dc", "def ggpl_stair_landings(dx,dy,dz):\n dati=rizerAndTread(dy,dz)\n nGradini = dati[2]\n alzata=dati[1]\n pedata=dati[0]\n yPianerottolo = dy/3.0\n lGradino = dx/2\n pianerottolo=CUBOID([dx,yPianerottolo,alzata])\n halfSteps=0\n dispari=0\n if nGradini%2==0:\n halfSteps=nGradini/2\n else:\n halfSteps=(nGradini-1)/2\n dispari=1\n scala=[]\n diagonale=[]\n dist=[-(dx/2),dx/2]\n dist2=[dx/2]\n distDiag=QUOTE(dist)\n scala.append(T([1])(lGradino))\n a=0\n p=pedata\n diagonale.append([a,p])\n diagonale.append([alzata,p])\n diagonale.append([alzata,2*p])\n d=MKPOL([diagonale,[[1,2,3]],None])\n d=PROD([distDiag,d])\n d=STRUCT([R([2,3])(PI/2),d])\n d=STRUCT([R([1,2])(PI),d])\n d=STRUCT([T([1])(dx*3/2),d])\n for i in range (1,int(halfSteps)):\n p=p+pedata\n a=a+alzata\n scala.append(CUBOID([lGradino,pedata,alzata]))\n scala.append(T([1,2,3])([0,pedata,alzata]))\n d=STRUCT([d,T([2,3])([pedata,alzata]),d])\n \n scala.append(CUBOID([lGradino,pedata,alzata]))\n scala.append(T([1,2,3])([-lGradino,pedata,alzata]))\n scala.append(pianerottolo)\n \n scala1=STRUCT([STRUCT(scala),d])\n \n if dispari:\n halfSteps2=halfSteps+1\n\n scalaMirror=[]\n diagonaleMirror=[]\n dist=[-(dx/2),dx/2]\n dist2=[dx/2]\n distDiag=QUOTE(dist)\n scalaMirror.append(T([1])(lGradino))\n a=0\n p=pedata\n diagonaleMirror.append([a,p])\n diagonaleMirror.append([alzata,p])\n diagonaleMirror.append([alzata,2*p])\n d2=MKPOL([diagonale,[[1,2,3]],None])\n d2=PROD([distDiag,d2])\n d2=STRUCT([R([2,3])(PI/2),d2])\n d2=STRUCT([R([1,2])(PI),d2])\n d2=STRUCT([T([1,2,3])([dx*3/2,-pedata,-alzata]),d2])\n \n for i in range (1,int(halfSteps)):\n d2=STRUCT([d2,T([2,3])([pedata,alzata]),d2])\n p=p+pedata\n a=a+alzata\n scalaMirror.append(CUBOID([lGradino,pedata,alzata]))\n scalaMirror.append(T([1,2,3])([0,pedata,alzata]))\n\n p=p+pedata\n a=a+alzata\n \n scalaMirror.append(CUBOID([lGradino,pedata,alzata]))\n \n \n scala2=STRUCT([STRUCT(scalaMirror),d2])\n scala2=STRUCT([R([1,2])(PI),scala2])\n scala2=STRUCT([T([1,2,3])([lGradino*2,pedata*(halfSteps),alzata*(halfSteps+1)]),scala2])\n \n scala2=STRUCT([scala1,scala2])\n a=SIZE([1,2,3])(BOX([1,2,3])(scala2))\n \n sx=dx/a[0] \n sy=dy/a[1]\n sz=dz/a[2]\n\n scala2=STRUCT([COLOR(color(255,255,255)),S([1,2,3])([sx,sy,sz]),scala2])\n return scala2", "def analytic_dLdp(q,ps,C1s,C0s,ks,bs,sigma=1):\n n_p=len(ps)\n r=np.linalg.norm(ps-q,axis=1).reshape(-1,1)\n r_hat=(ps-q)/r\n t_hat=np.zeros(r_hat.shape)\n t_hat[:,0]=-r_hat[:,1]\n t_hat[:,1]=r_hat[:,0]\n\n dLdeta=np.zeros(n_p).reshape(-1,1)\n dLdr=np.zeros(n_p).reshape(-1,1)\n\n\n for i in range(n_p):\n Keta=2*(ks[i]*bs[i])**2/(sigma**2) * (r[i]-C1s[i])**(2*bs[i]-2)\n Kr=2*(ks[i]*bs[i])**2/(sigma**2) * (bs[i]-1) * (r[i]-C1s[i])**(2*bs[i]-3)\n sum_eta=sum_kr=0\n for j in range(n_p):\n \n rkrj=np.max([np.min([r_hat[i,:].dot(r_hat[j,:]),1]),-1])\n \n direction=np.sign(np.linalg.det(r_hat[[j,i],:]))\n\n sum_eta += (ks[j]*bs[j])**2 * (r[j]-C1s[j])**(2*bs[j]-2) * rkrj * np.sqrt(1-rkrj**2) * direction\n sum_kr += (ks[j]*bs[j])**2 * (r[j]-C1s[j])**(2*bs[j]-2) * (1-rkrj**2)\n \n dLdeta[i]=Keta*sum_eta\n dLdr[i]=Kr*sum_kr\n \n dLdp = dLdr * r_hat + (dLdeta/r) * t_hat\n \n \n return dLdp", "def _update_latent_resp(data, smm_dof, posterior_nws_scale,\n posterior_nws_dof, log_smm_mixweight,\n log_det_precision, scatter):\n num_features = data.shape[1]\n\n latent_resp = (gammaln((num_features + smm_dof) / 2) - \n gammaln(smm_dof / 2) - \n (num_features / 2) * np.log(smm_dof * pi) + \n log_smm_mixweight + log_det_precision / 2 - \n ((num_features + smm_dof) / 2) * \n np.log(1 + \n (posterior_nws_dof / smm_dof).T * scatter.T + \n (num_features / \n (smm_dof * posterior_nws_scale)).T))\n\n latent_resp = normalize_logspace(latent_resp)\n return latent_resp", "def smooth_input(xs, ys, L):\n n = len(xs)\n\n # obj = [1 for i in range(n)]\n # for i in range(2 * n):\n # obj.append(0)\n\n # Create the model\n model = LpProblem(name=\"small-problem\", sense=LpMinimize)\n ws = [LpVariable(name=\"w_{}\".format(i), lowBound=0, upBound=1) for i in range(n)]\n ls = [LpVariable(name=\"L_{}\".format(i), lowBound=0) for i in range(n)]\n zs = [LpVariable(name=\"z_{}\".format(i)) for i in range(n)]\n\n # objective\n model += lpSum(ws)\n\n # constraint 1:\n # sum of Li <= L\n model += (lpSum(ls) <= L * n, \"sum of Li <= L\")\n\n # Constraint 2:\n # w_i >= |z_i - y_i|\n for i in range(n):\n model += (ws[i] + zs[i] >= ys[i], \"C2.a_{}\".format(i))\n model += (ws[i] - zs[i] >= -ys[i], \"C2.b_{}\".format(i))\n\n # Constraint 3\n # |z_i - z_j| <= L_i * dist(x_i, x_j)\n for i in range(n):\n for j in range(n):\n if i != j:\n model += (zs[i] - zs[j] - abs(xs[i] - xs[j]) * ls[i] <= 0, \"C3.a_{}_{}\".format(i, j))\n model += (zs[j] - zs[i] - abs(xs[i] - xs[j]) * ls[i] <= 0, \"C3.b_{}_{}\".format(i, j))\n\n if model.solve() == 1:\n print(\n \"------------------------------------\\nFound solution for the linear program\\n------------------------------------\\n\")\n return [[xs[i], zs[i].value()] for i in range(n)]\n # return [zi.value() for zi in zs], [li.value() for li in ls]\n\n print(\"Linear program: no solution found\")\n exit(1)\n return -1", "def fit_1d_solution(p, loc, ll, iteration=0):\n\n func_name = __NAME__ + '.fit_1d_solution()'\n # get 1d solution\n loc = fit_1d_ll_solution(p, loc, ll, iteration)\n # invert solution\n loc = invert_1ds_ll_solution(p, loc, ll, iteration)\n # get the total number of orders to fit\n num_orders = len(loc['ALL_LINES_{0}'.format(iteration)])\n # get the dimensions of the data\n ydim, xdim = loc['HCDATA'].shape\n # get inv_params\n inv_params = loc['LL_PARAM_{0}'.format(iteration)]\n # set pixel shift to zero, as doesn't apply here\n pixel_shift_inter = 0\n pixel_shift_slope = 0\n # get new line list\n ll_out = spirouMath.get_ll_from_coefficients(pixel_shift_inter,\n pixel_shift_slope,\n inv_params, xdim, num_orders)\n # get the first derivative of the line list\n dll_out = spirouMath.get_dll_from_coefficients(inv_params, xdim, num_orders)\n # find the central pixel value\n centpix = ll_out.shape[1]//2\n # get the mean pixel scale (in km/s/pixel) of the central pixel\n norm = dll_out[:, centpix]/ll_out[:, centpix]\n meanpixscale = speed_of_light * np.nansum(norm)/len(ll_out[:, centpix])\n # get the total number of lines used\n total_lines = int(np.nansum(loc['X_ITER_2'][:, 2]))\n # add to loc\n loc['LL_OUT_{0}'.format(iteration)] = ll_out\n loc.set_source('LL_OUT_{0}'.format(iteration), func_name)\n loc['DLL_OUT_{0}'.format(iteration)] = dll_out\n loc.set_source('DLL_OUT_{0}'.format(iteration), func_name)\n loc['TOTAL_LINES_{0}'.format(iteration)] = total_lines\n loc.set_source('TOTAL_LINES_{0}'.format(iteration), func_name)\n # log message\n wmsg = 'On fiber {0} mean pixel scale at center: {1:.4f} [km/s/pixel]'\n WLOG(p, 'info', wmsg.format(p['FIBER'], meanpixscale))\n # return loc\n return loc", "def propagation_matrix(self,L,dt):\n neq=np.size(L,0)\n nstage=len(self)\n I =np.identity(nstage)\n I2=np.identity(neq)\n Z=np.kron(I,dt*L)\n X=np.kron(I,I2)-np.dot(np.kron(self.A,I2),Z)\n Xinv=np.linalg.inv(X)\n e=np.kron(np.ones(nstage)[:,np.newaxis],I2)\n G=I2 + np.dot(np.kron(self.b[:,np.newaxis],I2).T,\n np.dot(Z,np.dot(Xinv,e)))\n\n return G", "def system(L_x, W, L_sc_top, L_sc_bot, w, z_x, z_y, a, periodic, leads=False, transverse_SOI=True):\n # If the system is periodic shorten the length by one lattice constant\n if periodic:\n L_x = L_x - a\n\n template_strings = get_template_strings(transverse_SOI)\n templates = {k: kwant.continuum.discretize(v, coords=('x', 'y'), grid=a)\n for k, v in template_strings.items()}\n shapes = get_zigzag_shape(L_x, W, L_sc_top, L_sc_bot, w, z_x, z_y, a)\n\n if periodic:\n syst = kwant.Builder(kwant.TranslationalSymmetry([L_x + a, 0]))\n else:\n syst = kwant.Builder()\n\n if w == 0:\n normal_sites = syst.fill(templates['normal'], *shapes['normal'])\n\n else:\n norm_top_sites = syst.fill(templates['normal'], *shapes['normal_top'])\n norm_bot_sites = syst.fill(templates['normal'], *shapes['normal_bot'])\n sc_mid_sites = syst.fill(templates['sc_mid'], *shapes['sc_mid'])\n\n if L_sc_top > 0:\n sc_top_sites = syst.fill(templates['sc_top'], *shapes['sc_top'])\n\n if L_sc_bot > 0:\n sc_bot_sites = syst.fill(templates['sc_bot'], *shapes['sc_bot'])\n\n if periodic:\n syst = kwant.wraparound.wraparound(syst)\n\n if leads:\n if z_x != 0 and L_x % z_x != 0:\n raise NotImplementedError(\n 'Horizontal leads for L_x not and integer multiple of z_x are not implemented.', z_x, L_x)\n\n ph = np.kron(sigma_y, sigma_y)\n c_law = np.kron(sigma_0, sigma_z)\n\n lead_left = kwant.Builder(kwant.TranslationalSymmetry(\n [-a, 0]), conservation_law=c_law, particle_hole=ph)\n lead_right = kwant.Builder(kwant.TranslationalSymmetry(\n [a, 0]), conservation_law=c_law, particle_hole=ph)\n\n # Can't use lead.reversed() because the system might not be reflection\n # invariant if it has a zigzag shape\n for lead in [lead_left, lead_right]:\n lead_idx = 0 if lead == lead_left else -1\n x_lead = 0 if lead == lead_left else L_x\n\n lead_shape = shapes['normal_bot'][0] + shapes['normal_top'][0] + \\\n shapes['sc_mid'][0] if w != 0 else shapes['normal'][0]\n lead_shape = (lead_shape[lead_idx:, ::], (x_lead, 0))\n lead.fill(templates['normal'], *lead_shape)\n syst.attach_lead(lead)\n\n return syst.finalized()", "def lyap_dense_solver_options():\n\n return {'pymess_glyap': {'type': 'pymess_glyap'}}", "def build_linear_system(u, dt, dx, D = 3, P = 3,time_diff = 'poly',space_diff = 'poly',lam_t = None,lam_x = None, width_x = None,width_t = None, deg_x = 5,deg_t = None,sigma = 2):\n\n n, m = u.shape\n\n if width_x == None: width_x = n/10\n if width_t == None: width_t = m/10\n if deg_t == None: deg_t = deg_x\n\n # If we're using polynomials to take derviatives, then we toss the data around the edges.\n if time_diff == 'poly': \n m2 = m-2*width_t\n offset_t = width_t\n else: \n m2 = m\n offset_t = 0\n if space_diff == 'poly': \n n2 = n-2*width_x\n offset_x = width_x\n else: \n n2 = n\n offset_x = 0\n\n if lam_t == None: lam_t = 1.0/m\n if lam_x == None: lam_x = 1.0/n\n\n ########################\n # First take the time derivaitve for the left hand side of the equation\n ########################\n ut = np.zeros((n2,m2), dtype=u.dtype)\n\n if time_diff == 'FDconv':\n Usmooth = np.zeros((n,m), dtype=u.dtype)\n # Smooth across x cross-sections\n for j in range(m):\n Usmooth[:,j] = ConvSmoother(u[:,j],width_t,sigma)\n # Now take finite differences\n for i in range(n2):\n ut[i,:] = FiniteDiff(Usmooth[i + offset_x,:],dt,1)\n\n elif time_diff == 'poly':\n T= np.linspace(0,(m-1)*dt,m)\n for i in range(n2):\n ut[i,:] = PolyDiff(u[i+offset_x,:],T,diff=1,width=width_t,deg=deg_t)[:,0]\n\n elif time_diff == 'Tik':\n for i in range(n2):\n ut[i,:] = TikhonovDiff(u[i + offset_x,:], dt, lam_t)\n\n else:\n for i in range(n2):\n ut[i,:] = FiniteDiff(u[i + offset_x,:],dt,1)\n \n ut = np.reshape(ut, (n2*m2,1), order='F')\n\n ########################\n # Now form the rhs one column at a time, and record what each one is\n ########################\n\n u2 = u[offset_x:n-offset_x,offset_t:m-offset_t]\n Theta = np.zeros((n2*m2, (D+1)*(P+1)), dtype=u.dtype)\n ux = np.zeros((n2,m2), dtype=u.dtype)\n rhs_description = ['' for i in range((D+1)*(P+1))]\n\n if space_diff == 'poly': \n Du = {}\n for i in range(m2):\n Du[i] = PolyDiff(u[:,i+offset_t],np.linspace(0,(n-1)*dx,n),diff=D,width=width_x,deg=deg_x)\n if space_diff == 'Fourier': ik = 2*np.pi*1j*np.fft.fftfreq(n, d = dx)\n \n for d in range(D+1):\n\n if d > 0:\n for i in range(m2):\n if space_diff == 'Tik': ux[:,i] = TikhonovDiff(u[:,i+offset_t], dx, lam_x, d=d)\n elif space_diff == 'FDconv':\n Usmooth = ConvSmoother(u[:,i+offset_t],width_x,sigma)\n ux[:,i] = FiniteDiff(Usmooth,dx,d)\n elif space_diff == 'FD': ux[:,i] = FiniteDiff(u[:,i+offset_t],dx,d)\n elif space_diff == 'poly': ux[:,i] = Du[i][:,d-1]\n elif space_diff == 'Fourier': ux[:,i] = np.fft.ifft(ik**d*np.fft.fft(u[:,i]))\n else: ux = np.ones((n2,m2), dtype=u.dtype) \n \n for p in range(P+1):\n Theta[:, d*(P+1)+p] = np.reshape(np.multiply(ux, np.power(u2,p)), (n2*m2), order='F')\n\n if p == 1: rhs_description[d*(P+1)+p] = rhs_description[d*(P+1)+p]+'u'\n elif p>1: rhs_description[d*(P+1)+p] = rhs_description[d*(P+1)+p]+'u^' + str(p)\n if d > 0: rhs_description[d*(P+1)+p] = rhs_description[d*(P+1)+p]+\\\n 'u_{' + ''.join(['x' for _ in range(d)]) + '}'\n\n return ut, Theta, rhs_description", "def constrained_ls_tf(H, gam):\r\n # Create a laplacian transfer function with size given by H.\r\n P, Q = H.shape\r\n L = laplacian_tf(P=P, Q=Q)\r\n # Calculate constrained least squares transfer function.\r\n C = np.conjugate(H) / ((np.conjugate(H) * H) + gam * (np.conjugate(L) * L))\r\n return C", "def lnlike(theta, lsf, tell_sp):\n\n\t# If theta is entered as a list, make it into a dictionary\n\ttheta_keys = [key for key in ap.init.keys()]\n\tif type(theta) == np.ndarray:\n\t\ttheta = dict(zip(theta_keys, theta))\n\n\t# Choose the appropriate Spectrum class to read the data\n\tif ap.data['instrument'] == 'APOGEE':\n\t\tdata = ap.Apogee(id=ap.data['ID'], type=ap.data[\"dtype\"], visit=ap.data['visit'])\n\telse:\n\t\tprint('No Spectrum class to read data for instrument', ap.data['instrument'])\n\n\tchisq = ap.returnModelFit(data, theta, lsf=lsf, telluric=tell_sp)\n\n\tprint('\\n chisq', chisq, '\\n')\n\n\treturn -0.5 * chisq", "def solver_lasso(X, y, alpha=None, max_iter=3000, tol=1e-4, positive=False):\n\n n_tasks, n_samples, n_features = X.shape\n theta = np.zeros((n_features, n_tasks))\n\n if alpha is None:\n alpha = np.ones(n_tasks)\n alpha = np.asarray(alpha).reshape(n_tasks)\n for k in range(n_tasks):\n lasso = Lasso(alpha=alpha[k], tol=tol, max_iter=max_iter,\n fit_intercept=False, positive=positive)\n lasso.fit(X[k], y[k])\n theta[:, k] = lasso.coef_\n\n return theta", "def DM_NE2001(self, source, distance, smweight='uniform'):\n\n assert smweight.lower() in ['uniform','tau','theta','iso']\n\n if not isinstance(distance, astropy.units.quantity.Quantity):\n # assume kpc\n distance=distance*u.kpc \n if (len(distance.shape)>0 and distance.value.any() <= 0) or (len(distance.shape)==0 and distance.value < 0):\n raise ValueError('distance must be > 0')\n\n if not isinstance(source, astropy.coordinates.sky_coordinate.SkyCoord):\n if isinstance(source,str):\n # assume .par file\n source=parfile2SkyCoord(source)\n else:\n raise TypeError('Do not know how to interpret an object of type %s' % source.__class__)\n source=source.galactic\n\n\n if len(source.l.shape)==0:\n results=ne2001.dmdsm(self.datadir,\n np.radians(source.l.value),\n np.radians(source.b.value),\n -1,\n 0,\n distance.to(u.kpc).value)\n sign=1\n if results[2]=='>':\n #raise ValueError('DM returned a lower limit')\n sign=-1\n if smweight.lower() == 'uniform':\n SM=results[3]*u.kpc/u.m**(20./3)\n elif smweight.lower() == 'tau':\n SM=results[4]*u.kpc/u.m**(20./3)\n elif smweight.lower() == 'theta':\n SM=results[5]*u.kpc/u.m**(20./3)\n elif smweight.lower() == 'iso':\n SM=results[6]*u.kpc/u.m**(20./3)\n\n return sign*results[0]*u.pc/u.cm**3,SM\n else:\n dm=np.zeros_like(source.l.value)\n SM=np.zeros_like(source.l.value)\n it = np.nditer(source.l, flags=['multi_index'])\n if len(dm.shape)==0:\n dm_touse=dm\n else:\n dm_touse=dm[it.multi_index]\n while not it.finished:\n if len(distance.shape)==0:\n d_touse=distance\n else:\n d_touse=distance[it.multi_index]\n results=ne2001.dmdsm(self.datadir,\n np.radians(source[it.multi_index].l.value),\n np.radians(source[it.multi_index].b.value),\n -1,\n 0,\n d_touse.to(u.kpc).value)\n sign=1\n if results[2]=='>':\n #raise ValueError('DM returned a lower limit')\n sign=-1\n dm[it.multi_index]=results[0]*sign\n if smweight.lower() == 'uniform':\n SM[it.multi_index]=results[3]\n elif smweight.lower() == 'tau':\n SM[it.multi_index]=results[4]\n elif smweight.lower() == 'theta':\n SM[it.multi_index]=results[5]\n elif smweight.lower() == 'iso':\n SM[it.multi_index]=results[6]\n\n it.iternext()\n return dm*u.pc/u.cm**3,SM*u.kpc/u.m**(20./3)", "def encode_next_latent_state(self, data, latent_state, dts, odeint_rtol=None, odeint_atol=None, method=None):\n N = data.size(0)\n ts, inv_indices = torch.unique(dts, return_inverse=True)\n if ts[-1] == 0:\n return latent_state\n if ts[0] != 0:\n ts = torch.cat([torch.zeros(1, dtype=torch.float, device=self.device), ts])\n inv_indices += 1\n aug_latent_state = self.aug_layer(torch.cat((data, latent_state), dim=-1))\n traj_latent_state = self.diffeq_solver(aug_latent_state, ts, odeint_rtol, odeint_atol, method)\n selected_indices = tuple([torch.arange(N, dtype=torch.long, device=self.device), inv_indices])\n new_latent_state = traj_latent_state[selected_indices] # [N, D_latent]\n assert new_latent_state.size(0) == N\n assert new_latent_state.size(1) == self.latent_dim\n return new_latent_state", "def TrainGroupLasso(As, bs, groups, num_lambdas = 50, normalize=2):\n\n np.random.seed(0) # for consistancy\n\n m = len(As)\n n,D = As[0].shape\n\n # Normalize\n if normalize != 0:\n\n # get norm of each column\n candidate_norms = np.zeros(D)\n for i in range(D):\n candidate_norms[i] = Norm(np.vstack(A[:,i] for A in As), normalize)\n\n norm_bs = [m*Norm(b, normalize) for b in bs]\n\n # normalize \n for i in range(m):\n As[i] = As[i].dot(np.diag(candidate_norms**-1))\n bs[i] = bs[i]/norm_bs[i]\n\n # parameters for ADMM\n rho = 1e-3\n alpha = 1.5\n\n # Get array of lambdas to check\n # Looking at KKT conditions for group lasso, lambda higher than lambda_max will result in x=0\n # lambda_min is set arbitrailly to 1e-5 but if the optimal lambda turns out to be 0 or 1e-5, then one\n # could change this to check lower values\n lambda_max = np.max([np.sum([Norm(A[:,g].T.dot(b)) for (A,b) in zip(As,bs)]) for g in range(D)])\n lambda_min = 1e-5*lambda_max\n Lam = [0]+[np.exp(alpha) for alpha in np.linspace(np.log(lambda_min), np.log(lambda_max), num_lambdas)][:-1]\n\n # Test each value of lambda to find the best\n X = []\n Losses = []\n Histories = []\n\n for lam in Lam:\n x,history = GroupLassoADMM(As,bs,lam,groups,rho,alpha)\n X.append(x.reshape(D,m, order = 'F'))\n Losses.append(PDE_FIND_Loss(As,bs,x))\n Histories.append(history)\n\n if normalize != 0:\n for x in X:\n for i in range(D):\n for j in range(m):\n x[i,j] = x[i,j]/candidate_norms[i]*norm_bs[j]\n for i in range(m):\n As[i] = As[i].dot(np.diag(candidate_norms))\n bs[i] = bs[i]*norm_bs[i]\n\n return X,Lam,Losses,Histories", "def _solve_linear_system(self):\n\n # Solve the linear system\n centered_list_y, mean = self._center_data(self.list_y)\n y = np.linalg.solve(self.cov_matrix, centered_list_y)\n # Assert the resolution of the linear system went well\n assert np.allclose(np.array(centered_list_y), self.cov_matrix @ y)\n\n return y, mean", "def LM(f):\n return dmp_ground_LM(f.rep, f.lev, f.dom)", "def _holt_add_dam(x, xi, p, y, l, b, s, m, n, max_seen):\n alpha, beta, phi, alphac, betac, y_alpha = _holt_init(x, xi, p, y, l, b)\n if alpha == 0.0:\n return max_seen\n if beta > alpha:\n return max_seen\n for i in range(1, n):\n l[i] = (y_alpha[i - 1]) + (alphac * (l[i - 1] + phi * b[i - 1]))\n b[i] = (beta * (l[i] - l[i - 1])) + (betac * phi * b[i - 1])\n return sqeuclidean(l + phi * b, y)", "def _l1m_objective(a,X,*args):\n \n return(np.sum(np.apply_along_axis(_euclidnorm,1,_diffmat_objective(a,X))))", "def update_latent(self):\n self.scenario.update_latent()", "def incompatibility_solve_cg(self, useAMS=True):\n \n zero = Expression((\"0.0\", \"0.0\", \"0.0\"), degree=1)\n bc = DirichletBC(self.PN, zero, DirichletBoundary())\n \n T1 = Function(self.PN) # Solution for the curl curl problem\n T2 = Function(self.PN) # Solution for the curl curl problem\n T3 = Function(self.PN) # Solution for the curl curl problem\n\n if useAMS:\n \n # Set operator for the linear solver\n L_X = inner(self.strain_diff_1, curl(self.inc_v0))*dx\n A_X, b_X = assemble_system(self.a_X, L_X, bc)\n self.ksp_X.setOperators(as_backend_type(A_X).mat())\n self.ksp_X.solve(as_backend_type(b_X).vec(), as_backend_type(T1.vector()).vec())\n\n # Show linear solver details\n self.ksp_X.view()\n\n # Solve 2nd system\n L_X = inner(self.strain_diff_2, curl(self.inc_v0))*dx\n A_X, b_X = assemble_system(self.a_X, L_X, bc)\n self.ksp_X.setOperators(as_backend_type(A_X).mat())\n self.ksp_X.solve(as_backend_type(b_X).vec(), as_backend_type(T2.vector()).vec())\n\n # Solve 3nd system\n L_X = inner(self.strain_diff_3, curl(self.inc_v0))*dx\n A_X, b_X= assemble_system(self.a_X, L_X, bc)\n self.ksp_X.setOperators(as_backend_type(A_X).mat())\n self.ksp_X.solve(as_backend_type(b_X).vec(), as_backend_type(T3.vector()).vec())\n \n else:\n\n ### vanilla CG works with potential as RHS\n\n L_X = inner(self.strain_diff_1, curl(self.inc_v0))*dx\n solve(self.a_X == L_X, T1, bc, \n solver_parameters={'linear_solver': 'cg', 'preconditioner': 'jacobi'}) \n\n L_X = inner(self.strain_diff_2, curl(self.inc_v0))*dx\n solve(self.a_X == L_X, T2, bc, \n solver_parameters={'linear_solver': 'cg', 'preconditioner': 'jacobi'}) \n\n L_X = inner(self.strain_diff_3, curl(self.inc_v0))*dx\n solve(self.a_X == L_X, T3, bc, \n solver_parameters={'linear_solver': 'cg', 'preconditioner': 'jacobi'})\n\n return project( self.X_0(curl(T1),curl(T2),curl(T3)), \n self.TFS, solver_type=\"cg\", preconditioner_type=\"ilu\")", "def sorm(func, dist_list, init_search_point, alg): \n def SLSQP(func, dist_list, init_search_point):\n \n dim = len(dist_list)\n current_beta = 0\n new_beta = 1\n sig = np.empty((1, dim))\n mu = np.empty((1, dim))\n new_search_point = np.array(init_search_point).reshape((1, dim))\n \n def f_l(x_l):\n return(func([x_l[i,:]*sig[0,i] + mu[0,i] for i in range(0, dim)]))\n \n while abs(current_beta-new_beta) > 0.001:\n current_search_point = new_search_point\n current_beta = new_beta\n for i in range(0, dim):\n if dist_list[i][1] != 'norm':\n mu[0,i], sig[0, i] = Rosenblatt_Transform(dist_list[i][0], current_search_point[0,i])\n else:\n mu[0,i], sig[0, i] = dist_list[i][0].mean(), dist_list[i][0].std()\n \n dist_fun = lambda u: np.linalg.norm(u) \n \n alg = 'SLSQP'\n \n H = lambda u: f_l(u)\n cons = ({'type': 'eq', 'fun': lambda u: -(H(u.reshape(-1,1)))})\n \n result = scipy.optimize.minimize(dist_fun, x0 = current_search_point, constraints = cons, method=alg)\n \n new_beta = result.fun\n u = np.array(result.x).reshape((1,dim))\n \n new_search_point = np.empty((1, dim))\n for i in range(0, dim):\n new_search_point[0,i] = mu[0,i] + u[0,i]*sig[0,i]\n \n beta_value = new_beta \n p_f = sst.norm.cdf(-beta_value)\n iterations = result.nit\n u = result.x\n x = u[:]*sig[0,:] + mu[0,:]\n grad_val = scipy.optimize.approx_fprime(x, func, 0.00000001)\n grad_val = grad_val.reshape((1, dim))\n \n sum1 = np.sum((grad_val[0,:]**2)*(sig[0,:]**2))\n cosines = np.empty((1, dim))\n \n for i in range(0, dim):\n cosines[0,i] = grad_val[0,i]*sig[0,i]/np.sqrt(sum1) \n \n return(beta_value, p_f, x, u, mu, sig, cosines, iterations) \n \n def HL_R(func, dist_list, init_search_point):\n \n iterations = 0\n cur_beta = 3\n new_beta = 0\n dim = len(dist_list)\n global_mean_arr = np.empty((1, dim))\n global_std_arr = np.empty((1, dim))\n new_search_point = np.array(init_search_point).reshape((1, dim))\n \n while abs(cur_beta - new_beta) > 0.001:\n cur_beta = new_beta\n cur_cosines = np.zeros((1, dim))\n new_cosines = np.ones((1, dim))\n \n while max((abs(cur_cosines - new_cosines))[0]) > 0.005:\n \n cur_cosines = new_cosines\n \n cur_search_point = new_search_point\n \n for i in range(0, dim):\n if dist_list[i][1] != 'norm':\n global_mean_arr[0, i], global_std_arr[0, i] = Rosenblatt_Transform(dist_list[i][0], cur_search_point[0,i])\n else:\n global_mean_arr[0, i], global_std_arr[0, i] = dist_list[i][0].mean(), dist_list[i][0].std()\n \n \n grad_val = scipy.optimize.approx_fprime(cur_search_point[0], func, 0.00000001)\n grad_val = grad_val.reshape((1, dim))\n \n sum1 = np.sum((grad_val[0,:]**2)*(global_std_arr[0,:]**2))\n cosines = np.empty((1, dim))\n \n for i in range(0, dim):\n cosines[0,i] = grad_val[0,i]*global_std_arr[0,i]/np.sqrt(sum1)\n \n new_cosines = cosines\n new_search_point = np.empty((1, dim))\n for i in range(0, dim):\n new_search_point[0,i] = global_mean_arr[0,i] - new_cosines[0,i]*global_std_arr[0,i]*cur_beta\n \n iterations = iterations + 1\n \n \n B = Symbol('B')\n coordinates = []\n for i in range(0, dim):\n coordinates.append(global_mean_arr[0, i] - new_cosines[0,i]*global_std_arr[0, i]*B)\n new_beta = float(solve(func(coordinates), B)[0])\n \n cosines = new_cosines \n beta_value = new_beta\n p_f = sst.norm.cdf(-new_beta)\n x = new_search_point\n u = (x[0,:] - global_mean_arr[0,:])/global_std_arr\n \n return(beta_value, p_f, x, u, global_mean_arr, global_std_arr, cosines, iterations)\n \n def HL_RF(func, dist_list, init_search_point):\n\n cur_beta = 3\n new_beta = 0\n dim = len(dist_list)\n\n new_search_point = np.array(init_search_point).reshape((1, dim))\n iterations = 0\n while abs(cur_beta - new_beta) > 0.001 and abs(func(new_search_point[0])) > 0.001:\n global_mean_arr = np.empty((1, dim))\n global_std_arr = np.empty((1, dim))\n cur_beta = new_beta\n cur_search_point = new_search_point\n \n for i in range(0, dim):\n if dist_list[i][1] != 'norm':\n global_mean_arr[0,i], global_std_arr[0, i] = Rosenblatt_Transform(dist_list[i][0], cur_search_point[0,i])\n else:\n global_mean_arr[0,i], global_std_arr[0, i] = dist_list[i][0].mean(), dist_list[i][0].std()\n \n f_val = func(cur_search_point[0])\n \n x_ast = np.empty((1, dim))\n for i in range(0, dim):\n x_ast[0,i] =(cur_search_point[0,i] - global_mean_arr[0,i])/global_std_arr[0,i]\n\n grad_val = scipy.optimize.approx_fprime(cur_search_point[0], func, 0.000001)\n grad_val = grad_val.reshape((1, dim)) \n \n grad_val_ast = np.empty(grad_val.shape)\n for i in range(0, dim):\n grad_val_ast[0,i] = grad_val[0,i]*global_std_arr[0,i]\n \n t1 = 1/np.sum(grad_val_ast[0,:]**2)\n\n t2 = sum(grad_val_ast[0,:]*x_ast[0,:]) - f_val\n \n t3 = t1*t2\n \n new_x_ast = np.empty(x_ast.shape)\n for i in range(0, dim):\n new_x_ast[0,i] = t3*grad_val_ast[0,i]\n u = new_x_ast\n new_beta = np.linalg.norm(new_x_ast)\n \n new_search_point = np.empty((1, dim))\n for i in range(0, dim):\n new_search_point[0,i] = new_x_ast[0,i]*global_std_arr[0,i] + global_mean_arr[0,i]\n iterations = iterations + 1\n \n grad_val_ast_sum = sum(grad_val_ast[0,:]**2)\n cosines = grad_val_ast/(grad_val_ast_sum**0.5)\n beta_value = new_beta\n x = new_search_point\n p_f = sst.norm.cdf(-beta_value)\n \n return(beta_value, p_f, x, u, global_mean_arr, global_std_arr, cosines, iterations)\n \n if alg == 'slsqp':\n beta_value, p_f, x, u, mu, sig, cosines, iterations = SLSQP(func, dist_list, init_search_point)\n elif alg == 'HL-R':\n beta_value, p_f, x, u, mu, sig, cosines, iterations = HL_R(func, dist_list, init_search_point)\n elif alg == 'HL-RF':\n beta_value, p_f, x, u, mu, sig, cosines, iterations = HL_RF(func, dist_list, init_search_point)\n \n d = len(dist_list)\n\n R0 = np.eye(d)\n \n for i in range(0, d):\n R0[-1,i] = cosines[0,i]\n \n Q, R = scipy.linalg.rq(R0)\n \n def f_l(x_l):\n return(func([x_l[i]*sig[0,i] + mu[0,i] for i in range(0, d)]))\n \n x = np.array(x).reshape((1, -1))\n u = x[0,:]*sig[0,:] + mu[0,:]\n \n H = nd.Hessian(f_l)(u)\n \n grad_val_standard = (scipy.optimize.approx_fprime(x[0], func, 0.00000001)[:])*(sig[0,:])\n \n dist_standard = np.linalg.norm(grad_val_standard)\n \n A_1 = 1/dist_standard\n R_transp = np.transpose(R)\n A_2 = R.dot(H)\n A_3 = A_2.dot(R_transp)\n \n A = A_3.dot(A_1)\n \n A = A[0:-1, 0:-1]\n \n k = np.linalg.eig(A)[0]\n \n prod_arr = np.empty((1, len(k)))\n for i in range(0, len(k)):\n prod_arr[0,i] = (1 + beta_value*k[i])**-0.5\n \n p_f_sorm = p_f*np.prod(prod_arr)\n beta_sorm = -1*scipy.stats.norm.ppf(p_f_sorm)\n \n print('-------------------------')\n print('Second-Order Reliability Analysis')\n print('Algorithm:',alg,'solver')\n print('Iterations: {}\\nReliability index = {}\\nProbability of failure = {}'.format(iterations, beta_sorm, p_f_sorm))\n print('-------------------------')\n \n return(beta_sorm, p_f_sorm)", "def solve_inc(self, DU, DF, calcG=True):\n\n nu = len(self.udofs)\n np = len(self.pdofs)\n ndof = len(self.dofs)\n decompose = False\n if calcG: decompose = True\n scheme = self.scheme\n\n if calcG:\n if self.verbose and nu>500: print \" building system...\", ; sys.stdout.flush()\n self.mountG()\n\n # Mount G11.. G22 matrices\n cG = self.G.tocsc()\n self.G11 = cG[:nu , :nu ]\n self.G12 = cG[:nu , nu:]\n self.G21 = cG[ nu:, :nu ]\n self.G22 = cG[ nu:, nu:]\n cG = None # Free memory\n\n # Pick last values for disp, vel and accel\n U_0 = self.U.copy()\n Uv_0 = self.Uv.copy()\n Ua_0 = self.Ua.copy()\n\n # Mount RHS\n self.RHS = self.DF - dot(self.C, Uv_0 + (1.0-gamma)*h*Ua_0) - dot(self.K, U_0 + h*Uv_0 + (0.5-beta)*(h**2.0)*Ua_0) \n\n RHS1 = RHS[:nu]\n Ua2 = DU[nu:]\n\n # Solve linear system\n RHS2 = self.G22*Ua2 #sparse matrix * dense vector\n if nu:\n if self.verbose and nu>500: print \"solving...\", ; sys.stdout.flush()\n if scheme == \"MNR\" and decompose : self.LUsolver = factorized(self.G11)\n if scheme == \"NR\" or scheme == \"FE\": self.LUsolver = factorized(self.G11)\n U1 = scipy.sparse.linalg.spsolve(self.G11, RHS1 - self.G12*Ua2)\n RHS2 += self.G21*Ua1\n\n # updating disp, vel and accel\n self.Uv = Uv_0 + (1.0-gamma)*h*Ua_0 + gamma*h*self.Ua\n self.U = U_0 + h*Uv_0 + (0.5-beta)*(h**2.0)*Ua_0 + (h**2.0)*beta*self.Ua\n \n # calculating reactions\n self.DF = dot(self.M,self.Ua) + dot(self.C,self.Uv) + dot(self.K,self.U)\n for i in range(nu):\n self.F[self.udofs[i].eq_id] = F_bk[self.udofs[i].eq_id]\n\n # Complete vectors\n for i, dof in enumerate(self.udofs): DU[dof.eq_id] = U1[i]\n for i, dof in enumerate(self.pdofs): DF[dof.eq_id] = F2[i]\n\n if self.verbose and nu>500: print \"updating...\" ; sys.stdout.flush()\n DFint = self.update_elems_and_nodes(DU) # Also calculates DFint\n #if self.verbose: print \" done.\"\n\n R = DF - DFint\n return DFint, R", "def ObjectiveADMM(As, bs, Ts, lam, groups, x, z):\n \n obj = 0\n for j in range(len(As)):\n obj = obj + 0.5*Norm(As[j].dot(x[Ts[j]])-bs[j])**2\n\n obj = obj + lam*np.sum([Norm(z[g]) for g in groups])\n return obj", "def next_step(self, Dxy_a):\n num_x_axes = len(self.x_axes)\n num_y_axes = len(self.y_axes)\n set_x = set(range(num_x_axes))\n set_y = set(range(num_x_axes, num_x_axes + num_y_axes, 1))\n\n log_Dxy_a = []\n log_Dx_Dy_a = []\n entang_a = []\n for alp in range(self.num_hidden_states):\n Dxy_alp = Dxy_a[alp]\n log_Dxy_alp = self.log(Dxy_alp)\n log_Dxy_a.append(log_Dxy_alp)\n\n Dx_alp = Dxy_alp.get_partial_tr(set_y)\n Dy_alp = Dxy_alp.get_partial_tr(set_x)\n log_Dx_Dy_alp = self.log(DenMat.get_kron_prod_of_den_mats(\n [Dx_alp, Dy_alp]))\n log_Dx_Dy_a.append(log_Dx_Dy_alp)\n\n entang_alp = (Dxy_alp*log_Dxy_alp -\n Dxy_alp*log_Dx_Dy_alp).trace()/2\n if alp == -1:\n print(\"llll-norm\", np.linalg.norm(Dxy_alp.arr),\n Dxy_alp.trace())\n print('hhhhhhh', np.linalg.norm((log_Dxy_alp -\n log_Dx_Dy_alp).arr))\n Dx_Dy_alp = DenMat.get_kron_prod_of_den_mats([Dx_alp, Dy_alp])\n print('vvvvvvvvv', np.linalg.norm(Dxy_alp.arr -\n Dx_Dy_alp.arr))\n print('ccccccDx', Dx_alp)\n print('ccccccDy', Dy_alp)\n print('ccccccDxy', Dxy_alp)\n print('ccccccDxDy', Dx_Dy_alp)\n print((\"bbbnnnmmm\", (Dxy_alp*log_Dxy_alp).trace()/2,\n -(Dxy_alp*log_Dx_Dy_alp).trace()/2))\n entang_a.append(entang_alp)\n alp_min, entang = min(enumerate(entang_a), key=itemgetter(1))\n if self.verbose:\n print('entang_a=', entang_a)\n print('alp_min=', alp_min)\n lam_xy = log_Dxy_a[alp_min] - log_Dx_Dy_a[alp_min]\n\n # this loop will fill list new_Dxy_a\n new_Dxy_a = []\n for alp in range(self.num_hidden_states):\n if alp == alp_min:\n new_Dxy_alp = self.Dxy\n else:\n new_Dxy_alp = self.exp(lam_xy + log_Dx_Dy_a[alp])\n new_Dxy_alp *= (1/new_Dxy_alp.trace())\n new_Dxy_a.append(new_Dxy_alp)\n # print('xxxxxxrrrrr 0=', np.linalg.norm(\n # new_Dxy_a[alp_min].arr - self.Dxy.arr))\n return entang, new_Dxy_a", "def get_sgd_solution(TRAINING_PHI, TEST_PHI, VAL_PHI, W_Now, TrainingData,\n TrainingTarget, TestData, ValData):\n # Gradient Descent Solution for Linear Regression\n La = 2\n # learningRate = 0.01\n L_Erms_Val, L_Erms_TR, L_Erms_Test, L_Accuracy_Test, W_Mat = [], [], [], [], []\n\n for i in range(0, 400):\n\n # print (f'---------Iteration: {i} M{M} LR {learningRate} L :{C_Lambda}--------------')\n Delta_E_D = -np.dot(\n (TrainingTarget[i] - np.dot(np.transpose(W_Now), TRAINING_PHI[i])),\n TRAINING_PHI[i])\n La_Delta_E_W = np.dot(La, W_Now)\n Delta_E = np.add(Delta_E_D, La_Delta_E_W)\n Delta_W = -np.dot(learningRate, Delta_E)\n W_T_Next = W_Now + Delta_W\n W_Now = W_T_Next\n\n #-----------------TrainingData Accuracy---------------------#\n TR_TEST_OUT = GetValTest(TRAINING_PHI, W_T_Next)\n Erms_TR = GetErms(TR_TEST_OUT, TrainingTarget)\n L_Erms_TR.append(float(Erms_TR.split(',')[1]))\n\n #-----------------ValidationData Accuracy---------------------#\n VAL_TEST_OUT = GetValTest(VAL_PHI, W_T_Next)\n Erms_Val = GetErms(VAL_TEST_OUT, ValDataAct)\n L_Erms_Val.append(float(Erms_Val.split(',')[1]))\n\n #-----------------TestingData Accuracy---------------------#\n TEST_OUT = GetValTest(TEST_PHI, W_T_Next)\n Erms_Test = GetErms(TEST_OUT, TestDataAct)\n L_Erms_Test.append(float(Erms_Test.split(',')[1]))\n L_Accuracy_Test.append(float(Erms_Test.split(',')[0]))\n\n return ([L_Erms_TR, L_Erms_Val, L_Erms_Test, L_Accuracy_Test])", "def optimizeBasis(self, legs, alpha=1., init=0., tol=1e-6, verbose=False):\n from scipy.optimize import minimize\n from sloth.utils import renyi_entropy\n\n def entanglement(theta):\n from sloth.utils import flatten_svals\n A = self.rotate(theta[0], legs)\n S = A.svd(leg=A.internallegs[0], compute_uv=False)\n entropy = renyi_entropy(S, alpha)\n if verbose:\n print(f\"S(theta={theta}) = {entropy}\")\n normsv = np.linalg.norm(flatten_svals(S))\n if not np.isclose(normsv, 1.):\n print(f\"Error on singular values of {abs(1 - normsv)}\")\n return entropy\n\n res = minimize(entanglement, np.array(init), tol=tol,\n bounds=[(-1.6, 1.6)])\n return res", "def ADMM_soft(d, mtx, kernel, l, batch=None, p=1E-6, niters=100, tol=1E-4, gt=None, init=None):\n \n # Pad input\n d, crop = __pad(d)\n dims = d.shape[:2]\n \n # Get sampling mask\n M = (d !=0)\n \n # Initialise \n if init is None:\n init = d\n else:\n init, _ = __pad(init)\n \n if batch is None:\n batch = sub_block(1)\n \n if gt is not None:\n err = np.zeros(niters)\n else:\n err = []\n \n x = init\n z = mtx.fwd(x, kernel)\n u = 0*z\n \n # Get normalisation factor\n N = mtx.norm(dims, kernel)\n \n # Precompute LHS\n Q = 1/(M + (p/2)*N)\n Q[np.isinf(Q)] = 0\n \n L = mtx.size(dims, kernel)[0]\n \n # ADMM iterations\n for i in range(niters):\n \n # x-update\n xx = Q*(d + (p/2)*mtx.adj(z - u, dims, kernel))\n \n # z-update\n H = mtx.fwd(xx, kernel)\n\n for idx in batch(dims, kernel, mtx):\n S,V = utils.half_SVD(H[idx,:] + u[idx,:])\n# print(S)\n# print(S.shape)\n# print(np.maximum(S-l/p,0))\n# print(np.maximum(S-l/p,0).shape)\n S=np.diag(S)\n U=(H[idx,:] + u[idx,:])@V@np.linalg.pinv(S)\n S=np.maximum(S-l/p,0)\n z[idx,:] = U@S@np.conj(V.T)\n \n # u-update\n u = u + H - z\n \n #penalty adjustment\n s = p*mtx.fwd(xx-x, kernel)\n if np.linalg.norm(z-H) > 10*np.linalg.norm(s):\n p = p*2\n u = u/2\n elif np.linalg.norm(s) >10*np.linalg.norm(z-H):\n p = p/2\n u = u*2\n \n# print(p)\n \n \n # Check relative update tolerance\n update = np.linalg.norm(xx.ravel()-x.ravel())/np.linalg.norm(x.ravel())\n if update < tol and i > 0:\n print(f'Min Update Tolerance Reached at {i} iterations')\n break\n \n \n # If ground truth available, print RMSE\n if gt is not None:\n err[i] = np.linalg.norm(xx[:crop[0],:crop[1]].ravel()-gt.ravel())/np.linalg.norm(gt.ravel())\n if np.mod(i,100) == 0:\n print(f'Iter: {i}, P: {p}, RMSE: {err[i]}')\n \n # Save estimate\n x = xx \n\n if gt is not None:\n return __unpad(x, crop), err\n else:\n return __unpad(x, crop)", "def _fit_apgl(x, mask, lmbd,\n max_iter=100, L=1e-3, beta=0.5,\n tol=1e-3, print_loss=False):\n # init\n n1, n2 = x.shape\n rdm = RandomState(123)\n theta = rdm.randn(n1, n2) # natural parameter\n thetaOld = theta\n alpha = 1\n alphaOld = 0\n\n # main loop\n loss = _cross_entropy(x, mask, theta) + lmbd * \\\n np.linalg.norm(theta, ord='nuc')\n iteration = []\n for i in range(int(max_iter)):\n if print_loss:\n print(f'Epoch {i}, loss {loss:.3f}')\n iteration.append(loss)\n lossOld = loss\n # nesterov extropolation\n A = theta + (alphaOld - 1) / alpha * (theta - thetaOld)\n for _ in range(50):\n S = A - L * _gradient(x, mask, A)\n thetaNew = svt(S, lmbd * L)\n ce = _cross_entropy(x, mask, thetaNew)\n if ce < _bound(x, mask, thetaNew, theta, L):\n break\n else:\n L = beta * L\n thetaOld = theta\n theta = thetaNew\n alphaOld = alpha\n alpha = (1 + np.sqrt(4 + alpha ** 2)) / 2\n loss = ce + lmbd * np.linalg.norm(theta, ord='nuc')\n if i == max_iter - 1:\n print(f'Reach max iteration {i+1}')\n if np.abs(lossOld - loss) < tol:\n break\n\n return theta, np.array(iteration)", "def rwgraph_analyze2(input=(None)):\r\n\r\n\r\n #set up graph and degree distribution arrays\r\n n=2000\r\n m=4\r\n G=nx.barabasi_albert_graph(n, m, seed=5)\r\n Nt=100\r\n M=20000\r\n maxdeg=0\r\n degree_dist=[]\r\n for i in range(0,n):\r\n degree_dist.append(G.degree[i])\r\n if G.degree[i]>maxdeg:\r\n maxdeg=G.degree[i]\r\n j=i\r\n\r\n #set inital conditions and D\r\n y0=np.zeros(n,dtype=int)\r\n y0[j]=200\r\n D=1\r\n #define time for odi Int\r\n t=np.arange(Nt+1,dtype=int)\r\n #set up operators\r\n A = nx.adjacency_matrix(G)\r\n Q = A.toarray().sum(axis=1)\r\n L=np.diag(Q)-A.toarray()\r\n Q_inv=1/Q\r\n Ls=np.diag(np.ones(n))-np.matmul(np.diag(Q_inv),A.toarray())\r\n Ls_tran=np.transpose(Ls)\r\n\r\n #convert to sparse operators and include diffusion\r\n L_spar = scipy.sparse.csr_matrix(-D*L)\r\n Ls_spar = scipy.sparse.csr_matrix(-D*Ls)\r\n Ls_tran_spar = scipy.sparse.csr_matrix(-D*Ls_tran)\r\n A=nx.adjacency_matrix(G)\r\n L=-D*(scipy.sparse.diags(degree_arr)-A)\r\n Ls=-D*(scipy.sparse.diags(np.ones(N))-scipy.sparse.diags(1/degree_arr).dot(A))\r\n\r\n #define operators\r\n def Lap(y,t):\r\n return scipy.sparse.csr_matrix.__mul__(L_spar,y)\r\n def Lap_Ls(y,t):\r\n return scipy.sparse.csr_matrix.__mul__(Ls_spar,y)\r\n def Lap_Ls_tran(y,t):\r\n return scipy.sparse.csr_matrix.__mul__(Ls_tran_spar,y)\r\n\r\n #solutions of different operators\r\n solL=scipy.integrate.odeint(Lap,y0,t)\r\n solLs=scipy.integrate.odeint(Lap_Ls,y0,t)\r\n solLs_tran=scipy.integrate.odeint(Lap_Ls_tran,y0,t)\r\n\r\n\r\n #finds eigen values and vectors and puts them into order\r\n def eigen(L):\r\n eigen_values,eigen_vectors=scipy.linalg.eig(-L)\r\n idx = eigen_values.argsort()[::-1]\r\n eigen_values = eigen_values[idx]\r\n eigen_vectors = eigen_vectors[:,idx]\r\n return eigen_values,eigen_vectors\r\n\r\n #finds all eigen values and eigen vectors of the different operators. can use sparse matrics\r\n eigen_values_LS,eigen_vectors_LS=eigen(Ls)\r\n eigen_values_LS_tran,eigen_vectors_LS_tran=eigen(Ls_tran)\r\n eigen_values_L,eigen_vectors_L=eigen(L)\r\n eigen_values_L2,eigen_vectors_L2=eigen(L*0.36)\r\n\r\n ### could have eigs here as didn't end up using all eigenvalues ####\r\n #eigen values graph\r\n n0=len(eigen_values_L)\r\n eig_nums=np.arange(n0)\r\n plt.figure(figsize=(12, 6))\r\n plt.scatter(eig_nums[0:10],eigen_values_L2[0:10],s=50,marker=\"x\" ,label='L , D=0.36')\r\n plt.scatter(eig_nums[0:10],eigen_values_LS[0:10],s=50, marker=\"|\",label='LS , D=1')\r\n plt.scatter(eig_nums[0:10],eigen_values_LS_tran[0:10],s=50,marker='_',label='LS_tran , D=1')\r\n plt.scatter(eig_nums[0:10],eigen_values_L[0:10],s=50,marker=\"+\" ,label='L , D=1')\r\n plt.legend(loc=\"lower left\", fontsize=12,fancybox=True, framealpha=1, shadow=True, borderpad=1)\r\n plt.xlabel('eigen value number')\r\n plt.ylabel('eigenvalue')\r\n plt.title(\"Eigenvlaues of Laplacian Matrixs\")\r\n plt.show()\r\n\r\n print(\"4 biggest eigenvalues for each operater\")\r\n print('L=',eigen_values_L[0:4])\r\n print('Ls=',eigen_values_LS[0:4])\r\n print('Ls_tran=',eigen_values_LS_tran[0:4])\r\n #prints 4 biggest eigen values\r\n #counts node distrubtion by creating dictionary\r\n def result_count(sol,Nt,G):\r\n \"\"\" returns cumlative frequency/probailties for nodes of same degree and returns dictionary\"\"\"\r\n n = G.number_of_nodes()\r\n dict_freq={}\r\n for i in range(n):\r\n k=G.degree(i)\r\n if k not in dict_freq:\r\n dict_freq[k]=sol[Nt,i]\r\n else:\r\n dict_freq[k]+=sol[Nt,i]\r\n return dict_freq\r\n\r\n #frequency count of solutions\r\n dict_freq=result_count(solL,Nt,G)\r\n dict_freq2=result_count(solLs,Nt,G)\r\n dict_freq3=result_count(solLs_tran,Nt,G)\r\n\r\n #random walk data\r\n X=rwgraph(G,j,20000,100)\r\n Listnodes7=[]\r\n for i in range(20000):\r\n Listnodes7.append(G.degree(X[i,100]))\r\n X=rwgraph(G,j,200,100)\r\n Listnodes8=[]\r\n for i in range(200):\r\n Listnodes8.append(G.degree(X[i,100]))\r\n X=rwgraph(G,j,50000,5000)\r\n Listnodes9=[]\r\n for i in range(50000):\r\n Listnodes9.append(G.degree(X[i,5000]))\r\n listfreq7=CountFrequency(Listnodes7)\r\n listfreq8=CountFrequency(Listnodes8)\r\n listfreq9=CountFrequency(Listnodes9)\r\n listfreq_deg=CountFrequency(degree_dist)\r\n z2=[]\r\n z3=[]\r\n z1=[]\r\n z_deg2=[]\r\n z_deg3=[]\r\n z_deg1=[]\r\n for i in listfreq7:\r\n z2.append(listfreq7[i]/(listfreq_deg[i]*20000))\r\n z_deg2.append(i)\r\n for i in listfreq8:\r\n z3.append(listfreq8[i]/(listfreq_deg[i]*200))\r\n z_deg3.append(i)\r\n for i in listfreq8:\r\n z1.append(listfreq9[i]/(listfreq_deg[i]*50000))\r\n z_deg1.append(i)\r\n #operator solutions compared to node degree frequency\r\n z4,z5,z6=[],[],[]\r\n z_deg4,z_deg5,z_deg6=[],[],[]\r\n for i in dict_freq:\r\n z4.append(dict_freq[i]/(listfreq_deg[i]*200))\r\n z_deg4.append(i)\r\n for i in dict_freq2:\r\n z5.append(dict_freq2[i]/(listfreq_deg[i]*200))\r\n z_deg5.append(i)\r\n for i in dict_freq3:\r\n z6.append(dict_freq3[i]/(listfreq_deg[i]*200))\r\n z_deg6.append(i)\r\n\r\n plt.figure(figsize=(15, 10))\r\n plt.scatter(z_deg1, z1,label='Nt=5000, M=50000')\r\n plt.scatter(z_deg2, z2,label='Nt=100, M=20000')\r\n plt.scatter(z_deg3, z3,label='Nt=100, M=200')\r\n plt.scatter(z_deg4, z4,label='L, Nt=100')\r\n plt.scatter(z_deg5, z5,label='Ls, Nt=100')\r\n plt.scatter(z_deg6, z6,label='Ls_tran, Nt=100')\r\n plt.ylim((-0.005,0.020))\r\n plt.xlabel('degree of node')\r\n plt.ylabel('frequency of final position / M*frequency of degree')\r\n plt.legend(loc=\"upper left\", fontsize=12,fancybox=True, framealpha=1, shadow=True, borderpad=1)\r\n plt.title(\"Frequency of final positions relative to number of nodes of that degree, for changing times Nt and M.\")\r\n plt.show()\r\n\r\n #code to produce final graph\r\n iarray1=LinearModel(G,x=j,i0=1,L1='L',D=1,tf=20,Nt=Nt)\r\n iarray2=LinearModel(G,x=j,i0=1,L1='Ls',D=1,tf=20,Nt=Nt)\r\n iarray3=LinearModel(G,x=j,i0=1,L1='Lst',D=1,tf=20,Nt=Nt)\r\n tarray = np.linspace(0,5,Nt+1)\r\n plt.figure(figsize=(12, 6))\r\n plt.plot(tarray, iarray1[:,7] ,label='rand node L,deg=46',color='b',alpha=0.5)\r\n plt.plot(tarray, iarray2[:,7] ,label='rand node Ls,deg=46',marker='|',color='r')\r\n plt.scatter(tarray, iarray3[:,7] ,label='rand node LST,deg=46',marker='_',color='y')\r\n plt.scatter(tarray, iarray1[:,1801] ,label='rand node L, deg=5',color='m',alpha=0.5,marker='+')\r\n plt.plot(tarray, iarray2[:,1801] ,label='rand node Ls,deg=5',marker='|',color='c')\r\n plt.scatter(tarray, iarray3[:,1801] ,label='rand node LST,deg=5',marker='_',color='g')\r\n plt.xlabel('time')\r\n plt.ylabel('representive frequency')\r\n plt.legend()\r\n plt.title(\"Comparing repestive frequency of a random nodes, for the different linear models,time step=50,D=0.1\")\r\n plt.show()\r\n return None #modify as needed\r", "def experiment_linear_lp(adv_norm_type, dual_norm_type, baseline_norm_types,\n attack_step_dir):\n module_name = 'train'\n # log_dir = 'runs_linear_%s' % adv_norm_type\n # log_dir = 'runs_linear_postnorm_%s' % adv_norm_type\n log_dir = 'runs_linear_postnorm_randinit_%s' % adv_norm_type\n exclude = '*'\n\n shared_params = get_shared_params(adv_norm_type, dual_norm_type,\n attack_step_dir)\n\n # No 0 regularization coefficient\n reg_coeff = [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1]\n # reg_coeff = [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 10]\n # Between 1e-3 and 1e-1 for d/n=10 the adv robustness drops\n # reg_coeff += [3e-3, 5e-3, 3e-2, 5e-2, 3e-1, 5e-1]\n\n # Model hyper-parameters\n linear_noreg_model_params = nameit('model', [\n ('arch', 'linear'),\n ('regularizer', 'none'),\n ])\n linear_reg_model_params = nameit('model', [\n ('arch', 'linear'),\n ('regularizer', ['w_%s' % b for b in baseline_norm_types] +\n ['w_%s' % dual_norm_type]),\n ('reg_coeff', reg_coeff),\n ])\n\n params = []\n\n # cvxpy solution\n # njobs=3*6*1=18\n cvxpy_params = nameit('optim', [\n ('name', 'cvxpy'),\n ('norm', dual_norm_type),\n ('niters', 10000),\n ('lr', 0), # keep cvxpy sol fixed\n ])\n params += [OrderedDict(shared_params+linear_noreg_model_params+cvxpy_params)]\n\n # njobs=3*6*2=36\n # CD with line search doesn't work right, so not including it\n gd_ls = nameit('optim', [\n ('name', 'gd_ls'), # ['gd_ls', 'cd_ls']),\n ('niters', 10000),\n ('bound_step', True),\n ])\n params += [OrderedDict(shared_params+linear_noreg_model_params+gd_ls)]\n\n # Implicit bias with fixed lr\n # GD with fixed lr performs similar to line search, so we don't include them\n # njobs=3*6*11=198\n # gd_fixed_lr = nameit('optim', [\n # ('name', 'gd'),\n # ('niters', 10000),\n # ('lr', [\n # 1e-5, 3e-5, 1e-4, 3e-4, 1e-3, 3e-3, 1e-2, 3e-2, 1e-1,\n # 3e-1, 1\n # ]),\n # ])\n # params += [OrderedDict(shared_params+linear_noreg_model_params+gd_fixed_lr)]\n\n # njobs=3*6*19=342\n cd_fixed_lr = nameit('optim', [\n ('name', ['cd', 'signgd']),\n ('niters', 10000),\n ('lr', [\n 1e-5, 3e-5, 1e-4, 3e-4, 1e-3, 3e-3, 1e-2, 3e-2, 1e-1,\n 3e-1, 1, 2, 3, 6, 9, 10, 20, 30, 50\n ]),\n ])\n params += [OrderedDict(shared_params+linear_noreg_model_params+cd_fixed_lr)]\n\n # Explicit regularization with line search\n # njobs=3*6*20*4*2=2880\n explicit_reg = nameit('optim', [\n ('name', 'fista'),\n ('niters', 10000),\n ('bound_step', True),\n ('step_size', [1, 10, 100, 1000]),\n ])\n params += [OrderedDict(shared_params+linear_reg_model_params+explicit_reg)]\n\n # Adversarial training with line search\n # njobs=3*6*5=90\n adv_train_params = nameit('optim', [\n ('name', 'gd_ls'),\n ('niters', 10000),\n ('bound_step', True),\n ])\n adv_train_params += nameit('optim', nameit('adv_train', [\n ('enable', True),\n ('norm_type', adv_norm_type),\n ('lr', 0.1),\n ('niters', 10), # niters, 1000\n ('pre_normalize', True),\n ('post_normalize', True),\n ('step_dir', attack_step_dir),\n ('eps_from_cvxpy', True),\n ]))\n adv_train_params = OrderedDict(\n shared_params+linear_noreg_model_params+adv_train_params)\n\n params += [adv_train_params]\n\n return params, log_dir, module_name, exclude", "def lltnum(self,):\n m = self.m\n n = self.n\n diag = self.diag\n perm = self.perm\n AAt = self.AAt\n kAAt = self.kAAt\n iAAt = self.iAAt\n mark = self.mark\n self.denwin\n\n m2 = m+n\n #/*------------------------------------------------------+\n #| initialize constants */\n\n temp = np.zeros(m2)\n first = np.zeros(m2, dtype=np.int)\n link = np.empty(m2, dtype=np.int)\n for i in range(m2):\n link[i] = -1\n\n maxdiag=0.0\n for i in range(m2):\n if abs(diag[i]) > maxdiag:\n maxdiag = abs(diag[i])\n\n self.ndep=0\n\n #/*------------------------------------------------------+\n #| begin main loop - this code is taken from George and |\n #| Liu's book, pg. 155, modified to do LDLt instead |\n #| of LLt factorization. */\n\n for i in range(m2):\n diagi = diag[i]\n sgn_diagi = -1 if perm[i] < n else 1\n j = link[i]\n while j != -1:\n newj = link[j]\n k = first[j]\n lij = AAt[k]\n lij_dj = lij*diag[j]\n diagi -= lij*lij_dj\n k_bgn = k+1\n k_end = kAAt[j+1]\n if k_bgn < k_end:\n first[j] = k_bgn\n row = iAAt[k_bgn]\n link[j] = link[row]\n link[row] = j\n if j < self.denwin:\n for kk in range(k_bgn, k_end):\n temp[iAAt[kk]] += lij_dj*AAt[kk]\n else:\n ptr = row\n for kk in range(k_bgn, k_end):\n temp[ptr] += lij_dj*AAt[kk]\n ptr+=1\n\n j=newj\n\n k_bgn = kAAt[i]\n k_end = kAAt[i+1]\n for kk in range(k_bgn, k_end):\n row = iAAt[kk]\n AAt[kk] -= temp[row]\n\n if abs(diagi) <= self.epsnum*maxdiag or mark[i] == False:\n\n #if (sgn_diagi*diagi <= epsnum*maxdiag || mark[i] == FALSE)\n\n self.ndep+=1\n maxoffdiag = 0.0\n for kk in range(k_bgn, k_end):\n maxoffdiag = max( maxoffdiag, abs( AAt[kk] ) )\n\n if maxoffdiag < 1.0e+6*self._EPS:\n mark[i] = False\n else:\n diagi = sgn_diagi * self._EPS\n\n diag[i] = diagi\n if k_bgn < k_end:\n first[i] = k_bgn\n row = iAAt[k_bgn]\n link[i] = link[row]\n link[row] = i\n for kk in range(k_bgn, k_end):\n row = iAAt[kk]\n if mark[i]:\n AAt[kk] /= diagi\n else:\n AAt[kk] = 0.0\n\n temp[row] = 0.0\n\n del(link)\n del(first)\n del(temp)", "def image_linear_solve(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None,\n kwargs_extinction=None, kwargs_special=None, inv_bool=False):\n kwargs_lens_i, kwargs_source_i, kwargs_lens_light_i, kwargs_ps_i, kwargs_extinction_i = self.select_kwargs(\n kwargs_lens,\n kwargs_source,\n kwargs_lens_light,\n kwargs_ps,\n kwargs_extinction)\n wls_model, error_map, cov_param, param = self._image_linear_solve(kwargs_lens_i, kwargs_source_i,\n kwargs_lens_light_i, kwargs_ps_i,\n kwargs_extinction_i, kwargs_special, inv_bool=inv_bool)\n # For the interfometric likelihood method, \n # return the array2 of [array1, array2] of the model output of _image_linear_solver.\n if self.Data.likelihood_method() == \"interferometry_natwt\":\n wls_model = wls_model[1]\n return wls_model, error_map, cov_param, param", "def matrix_regression(Y, X, lmbd=-1, L=-1, symmetric=True, iterations=5000, etol=10e-5, verbose=False):\n # check the dimensions of Y and X\n if Y.shape[1] > X.shape[1]:\n raise ValueError('X must have at least as many columns as Y.')\n if X.shape[0] != X.shape[0]:\n raise ValueError('X and Y must have the same row dimension.')\n if Y.ndim != 2 or X.ndim != 2:\n raise ValueError('X and Y must be matrices.')\n\n # default penalty parameter\n if lmbd <= 0:\n lmbd = 2 * (np.sqrt(Y.shape[1]) + np.sqrt(X.shape[1]) + 1) * (np.sqrt(X.shape[1]) + np.sqrt(X.shape[0]))\n\n # initial guess for solution\n prev_W = symmetrize(np.random.rand(X.shape[1],Y.shape[1]))\n Z = prev_W\n\n # compute Lipschitz constant for optimizer\n if L == -1:\n U, s, V = np.linalg.svd(X.T.dot(X))\n L = s[0]\n\n iters = 0\n err = 1\n alpha = 1\n \n # Implements step 3 of Algorithm 2 of Ji and Ye (2009). Other steps are avoided because we already computed the Lipschitz constant.\n while iters < iterations and err > etol:\n W = gradient_step(Y, X, lmbd, L, Z) # first part of step 3\n prev_alpha = alpha\n alpha = (1 + np.sqrt(1 + 4*(prev_alpha**2)))/2 # second part of step 3, equation (18)\n Z = W + ((prev_alpha - 1)/alpha) * (W - prev_W) # third part of step 3, equation (19)\n \n err = np.abs(prev_W - W).mean() # measure error relative to previous step\n iters += 1\n prev_W = W # update\n\n if iters%100==0 and verbose: print('Iteration {}. Error {}'.format(iters,err))\n \n if verbose: print('Iteration {}. Error {}'.format(iters,err))\n if iters == iterations: print('Warning: max iterations hit.')\n \n if symmetric: W = symmetrize(W) # optionally impose constraints on graph\n return W" ]
[ "0.5904229", "0.56168336", "0.5549464", "0.5510236", "0.54761505", "0.54374045", "0.5416033", "0.53865016", "0.5377905", "0.5372089", "0.53265786", "0.5315447", "0.52730715", "0.5262824", "0.52390164", "0.52042484", "0.5179361", "0.5165328", "0.51650435", "0.5147611", "0.5143179", "0.5130516", "0.51260084", "0.51134115", "0.51095116", "0.51057214", "0.5093891", "0.5088568", "0.50818145", "0.5080147", "0.5059045", "0.504144", "0.50408024", "0.5035424", "0.50146", "0.5008114", "0.5005498", "0.49972576", "0.4997054", "0.49950355", "0.49942005", "0.49934274", "0.49839982", "0.49582404", "0.4932667", "0.49311274", "0.49288255", "0.4923569", "0.49220216", "0.49193317", "0.48987335", "0.4885691", "0.48846984", "0.48781934", "0.48745027", "0.4874176", "0.48643053", "0.4857626", "0.4845253", "0.48425338", "0.48307246", "0.48286676", "0.4818483", "0.48157454", "0.48150378", "0.48135662", "0.4813155", "0.48055068", "0.48033926", "0.479228", "0.4790952", "0.47906327", "0.47866958", "0.4786536", "0.47855622", "0.47836527", "0.47813073", "0.4776494", "0.47714567", "0.4770208", "0.4765296", "0.47567475", "0.47459257", "0.47400463", "0.47396716", "0.47388643", "0.4733425", "0.47310382", "0.47307205", "0.47306204", "0.47289622", "0.47268575", "0.47265255", "0.4725572", "0.47255313", "0.47246504", "0.47111517", "0.47106493", "0.47072083", "0.46999165" ]
0.65866923
0
This is a wrapper for solving SGL problems on connected components of the solution and solving each block separately. See Witten, Friedman, Simon "New Insights for the Graphical Lasso" for details. It solves
def block_SGL(S, lambda1, Omega_0, Theta_0=None, X_0=None, rho=1., max_iter=1000, tol=1e-7, rtol=1e-3, stopping_criterion="boyd", update_rho=True, verbose=False, measure=False): assert Omega_0.shape == S.shape assert S.shape[0] == S.shape[1] assert lambda1 > 0 (p, p) = S.shape if Theta_0 is None: Theta_0 = Omega_0.copy() if X_0 is None: X_0 = np.zeros((p, p)) # compute connected components of S with lambda_1 threshold numC, allC = get_connected_components(S, lambda1) allOmega = list() allTheta = list() allX = list() for i in range(numC): C = allC[i] # single node connected components have a closed form solution, see Witten, Friedman, Simon "NEW INSIGHTS FOR THE GRAPHICAL LASSO " if len(C) == 1: # we use the OFF-DIAGONAL l1-penalty, otherwise it would be 1/(S[C,C]+lambda1) closed_sol = 1 / (S[C, C]) allOmega.append(closed_sol) allTheta.append(closed_sol) allX.append(np.array([0])) # else solve Graphical Lasso for the corresponding block else: block_S = S[np.ix_(C, C)] block_sol, block_info = ADMM_SGL(S=block_S, lambda1=lambda1, Omega_0=Omega_0[np.ix_(C, C)], Theta_0=Theta_0[np.ix_(C, C)], X_0=X_0[np.ix_(C, C)], tol=tol, rtol=rtol, stopping_criterion=stopping_criterion, update_rho=update_rho, rho=rho, max_iter=max_iter, verbose=verbose, measure=measure) allOmega.append(block_sol['Omega']) allTheta.append(block_sol['Theta']) allX.append(block_sol['X']) # compute inverse permutation per = np.hstack(allC) per1 = invert_permutation(per) # construct solution by applying inverse permutation indexing sol = dict() sol['Omega'] = block_diag(*allOmega)[np.ix_(per1, per1)] sol['Theta'] = block_diag(*allTheta)[np.ix_(per1, per1)] sol['X'] = block_diag(*allX)[np.ix_(per1, per1)] return sol
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gel_solve(\n A,\n y,\n l_1,\n l_2,\n ns,\n b_init=None,\n block_solve_fun=block_solve_agd,\n block_solve_kwargs=None,\n max_cd_iters=None,\n rel_tol=1e-6,\n Cs=None,\n Is=None,\n verbose=False,\n):\n p = len(A)\n m = len(y)\n device = A[0].device\n dtype = A[0].dtype\n y = y.to(device, dtype)\n if block_solve_kwargs is None:\n block_solve_kwargs = dict()\n\n # Create initial values if not specified.\n if b_init is None:\n b_init = 0.0, torch.zeros(p, max(ns), device=device, dtype=dtype)\n\n if not isinstance(ns, torch.Tensor):\n ns = torch.tensor(ns)\n sns = ns.to(device, dtype).sqrt()\n a_1 = l_1 * sns\n ma_1 = m * a_1\n a_2 = 2 * l_2 * sns\n b_0, B = b_init\n b_0_prev, B_prev = b_0, B\n k = 1 # iteration number\n pbar_stats = {} # stats for the outer progress bar\n pbar = tqdm.tqdm(\n desc=\"Solving gel with CD (l_1 {:.2g}, l_2 {:.2g})\".format(l_1, l_2),\n disable=not verbose,\n )\n\n while True:\n # First minimize with respect to b_0. This has a closed form solution\n # given by b_0 = 1'@(y - sum_j A_j@b_j) / m.\n b_0 = (y - sum(A[j] @ B[j, : ns[j]] for j in range(p))).sum() / m\n\n # Now, minimize with respect to each b_j.\n for j in tqdm.trange(\n p, desc=\"Solving individual blocks\", disable=not verbose, leave=False\n ):\n r_j = y - b_0 - sum(A[k] @ B[k, : ns[k]] for k in range(p) if k != j)\n\n # Check if b_j must be set to 0. The condition is ||A_j'@r_j|| <=\n # m*a_1.\n if (A[j].t() @ r_j).norm(p=2) <= ma_1[j]:\n B[j] = 0\n else:\n # Otherwise, minimize. First make sure initial value is not 0.\n if len((B[j, : ns[j]].abs() < 1e-6).nonzero()) == ns[j]:\n B[j, : ns[j]] = 1e-3\n\n # Add C_j and I_j to the arguments if using Newton's method.\n if block_solve_fun is block_solve_newton:\n block_solve_kwargs[\"C_j\"] = Cs[j]\n block_solve_kwargs[\"I_j\"] = Is[j]\n\n B[j, : ns[j]] = block_solve_fun(\n r_j,\n A[j],\n a_1[j].item(),\n a_2[j].item(),\n m,\n B[j, : ns[j]],\n verbose=verbose,\n **block_solve_kwargs,\n )\n\n # Compute relative change in b.\n b_0_diff = b_0 - b_0_prev\n B_diff = B - B_prev\n delta_norm = (b_0_diff ** 2 + (B_diff ** 2).sum()).sqrt()\n b_norm = (b_0 ** 2 + (B ** 2).sum()).sqrt()\n\n pbar_stats[\"rel change\"] = \"{:.2g}\".format(delta_norm.item() / b_norm.item())\n pbar.set_postfix(pbar_stats)\n pbar.update()\n\n # Check max iterations exit criterion.\n if max_cd_iters is not None and k == max_cd_iters:\n break\n k += 1\n\n # Check tolerance exit criterion.\n if delta_norm.item() <= rel_tol * b_norm.item() and k > 2:\n break\n b_0_prev, B_prev = b_0, B\n\n pbar.close()\n return b_0.item(), B", "def solve_l1(y, A_fun, AT_fun, lambda_l1, reshape_img_fun, show_img_progress=False, alpha=0.2, max_iter=100, solver_tol=1e-6):\n\n\n obj_lss = np.zeros(max_iter)\n x_zs = np.zeros(max_iter)\n u_norms = np.zeros(max_iter)\n times = np.zeros(max_iter)\n\n ATy = AT_fun(y)\n x_shape = ATy.shape\n d = np.prod(x_shape)\n\n def A_cgs_fun(x):\n x = np.reshape(x, x_shape, order='F')\n y = AT_fun(A_fun(x)) + alpha * x\n return vec(y)\n A_cgs = LinearOperator((d,d), matvec=A_cgs_fun, dtype='float')\n\n def compute_p_inv_A(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs, vec(b), x0=vec(z0), tol=1e-3, maxiter=100)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = np.reshape(z, x_shape, order='F')\n return z\n\n\n def A_cgs_fun_init(x):\n x = np.reshape(x, x_shape, order='F')\n y = AT_fun(A_fun(x))\n return vec(y)\n A_cgs_init = LinearOperator((d,d), matvec=A_cgs_fun_init, dtype='float')\n\n def compute_init(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs_init, vec(b), x0=vec(z0), tol=1e-2)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = np.reshape(z, x_shape, order='F')\n return z\n\n # initialize z and u\n z = compute_init(ATy, ATy)\n u = np.zeros(x_shape)\n\n\n plot_normalozer = matplotlib.colors.Normalize(vmin=0.0, vmax=1.0, clip=True)\n\n\n start_time = timeit.default_timer()\n\n for iter in range(max_iter):\n\n # x-update\n net_input = z+u\n Wzu, wbook = wavelet_transform(net_input)\n q = soft_threshold(Wzu, lambda_l1/alpha)\n x = inverse_wavelet_transform(q, wbook, x_shape)\n x = np.reshape(x, x_shape)\n\n # z-update\n b = ATy + alpha * (x - u)\n z = compute_p_inv_A(b, z)\n\n # u-update\n u += z - x;\n\n if show_img_progress == True:\n\n fig = plt.figure('current_sol')\n plt.gcf().clear()\n fig.canvas.set_window_title('iter %d' % iter)\n plt.subplot(1,3,1)\n plt.imshow(reshape_img_fun(np.clip(x, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('x')\n plt.subplot(1,3,2)\n plt.imshow(reshape_img_fun(np.clip(z, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('z')\n plt.subplot(1,3,3)\n plt.imshow(reshape_img_fun(np.clip(net_input, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('netin')\n plt.pause(0.00001)\n\n\n obj_ls = 0.5 * np.sum(np.square(y - A_fun(x)))\n x_z = np.sqrt(np.mean(np.square(x-z)))\n u_norm = np.sqrt(np.mean(np.square(u)))\n\n print('iter = %d: obj_ls = %.3e |x-z| = %.3e u_norm = %.3e' % (iter, obj_ls, x_z, u_norm))\n\n\n obj_lss[iter] = obj_ls\n x_zs[iter] = x_z\n u_norms[iter] = u_norm\n times[iter] = timeit.default_timer() - start_time\n\n if x_z < solver_tol:\n break\n\n infos = {'obj_lss': obj_lss, 'x_zs': x_zs, 'u_norms': u_norms,\n 'times': times, 'alpha':alpha, 'lambda_l1':lambda_l1,\n 'max_iter':max_iter, 'solver_tol':solver_tol}\n\n\n return (x, z, u, infos)", "def solve(self):\n\n # Assign variables to each quantity being solved.\n r_lookup, lookup, num = {}, {}, 0\n for element in self.elements:\n if is_wire(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n elif not is_cs(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n\n # Set up the linear algebraic equation Ax=b\n A = np.zeros((num, num))\n b = np.zeros(num)\n for row, element in lookup.items():\n if is_wire(element) and element is not self.ground:\n for two_sided in element.attached:\n if is_cs(two_sided):\n if two_sided.pos is element:\n b[row] += -1 * two_sided.current\n else:\n b[row] += two_sided.current\n else:\n if two_sided.pos is element:\n flow = 1\n else:\n flow = -1\n A[row, r_lookup[two_sided]] = flow\n elif is_vs(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n b[row] = element.voltage\n elif is_resistor(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n A[row, r_lookup[element]] = -1 * element.resistance\n\n b = b.reshape((num, 1))\n try:\n x = np.linalg.solve(A, b)\n except np.linalg.LinAlgError:\n raise CircuitError('Insufficient information to solve circuit')\n\n # Assign values to all circuit components\n for i in range(num):\n item = lookup[i]\n if is_wire(item):\n item.potential = x[i, 0]\n elif isinstance(item, DualSided):\n item.current = x[i, 0]\n\n # Mark circuit as solved\n self.been_solved = True", "def solver(u_init, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = 1e-3, max_iter = 5000, verbose = 0, nnls_max_iter=30):\n\n # Raise('NotImplementedError: only adjusted the arguments.')\n #Need to incorporate L_lhs into stacked and appropriate w_lin updates, u_update and eta_lin increments\n #precompute the expensive operation:\n lin_penalties = 1/np.sqrt(2*eta_lin)\n eta_T_H_L_stacked = scipy.sparse.vstack([T.multiply(1/np.sqrt(2*eta_0))] + [H[i].multiply(1/np.sqrt(2*eta[i])) for i in range(len(H))] + [L_lhs.multiply(lin_penalties[:,None])])\n #!!!!\n# premultiplied_lhs = eta_T_H_stacked.T.dot(eta_T_H_stacked).toarray()\n #!!!!\n u_prev = u_init + 1\n u = u_init\n count = 0\n obj_history = []\n relaxed_obj_history = [-1, 0.1] #just two initial values to enter the loop\n while np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2]) > ftol and count < max_iter:#np.linalg.norm(u - u_prev, np.inf) > 1e-3 and count < max_iter: #Maybe all of them stop changing\n start = time.time()\n \n u_prev = np.copy(u)\n w_0 = w_0_update(eta_0, u, T, alpha, B) \n w = w_update(u, H, gamma, D, C) \n w_lin = w_lin_update(u, L_lhs, L_rhs)\n# u = u_update(eta_0, eta, w_0, w, eta_T_H_stacked, nnls_max_iter=50)\n #!!!!\n # u = u_update(eta_0, eta, w_0, w, eta_T_H_L_stacked, nnls_max_iter=30)\n u = u_update(eta_0, eta, eta_lin, w_0, w, w_lin, eta_T_H_L_stacked, premultiplied_lhs = None, nnls_max_iter=nnls_max_iter)\n #!!!!\n count += 1 \n if count == 10:\n u_inf = np.copy(u)\n w_0_inf = w_0[:]\n w_inf = w[:]\n w_lin_inf = w_lin[:]\n if count > 10 and np.abs(cur_obj) > 1e+15: #HANDLE THIS BETTER!!!\n print('INFINITY! RETURNING u at the 10-th iteration to enter the feasibility loop')\n return u_inf, w_0_inf, w_inf, w_lin_inf, obj_history, relaxed_obj_history\n \n cur_obj = obj_u_opt_N_fixed(u, T, alpha, B)\n obj_history.append(cur_obj)\n cur_relaxed_obj = relaxed_obj_u_opt_N_fixed(u, w_0, w, w_lin, eta_0, eta, eta_lin, T, H, L_lhs, alpha, B)\n # relaxed_obj_u_opt_N_fixed(u, w_0, w, eta_0, eta, T, H, alpha, B)\n relaxed_obj_history.append(cur_relaxed_obj) \n \n stop = time.time()\n duration = stop-start\n \n if count%1 == 0 and verbose: \n stopping_criterion = np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2])\n print(' iter = {}, stopping criterion:{}, OBJ {}'.format(count, stopping_criterion, cur_obj))\n print(' This iteration took: {}'.format(duration))\n return u, w_0, w, w_lin, obj_history, relaxed_obj_history", "def __solve_full_linear_problem(self):\n samples = []\n\n for news in self.news_pool:\n samples += [news.sampled_quality] * self.layout_slots\n\n self.full_C = np.array(samples) * self.full_lambdas\n\n linear_problem = opt.linprog(A_ub=self.full_A, b_ub=self.full_B, c=self.full_C)\n slots_assegnation_probabilities = []\n slot_counter = 0\n tmp_slot_probabilities = []\n while slot_counter < self.layout_slots:\n i = slot_counter\n while i < len(linear_problem.x):\n tmp_slot_probabilities.append(np.abs(linear_problem.x[i]))\n i += self.layout_slots\n slots_assegnation_probabilities.append(tmp_slot_probabilities.copy())\n tmp_slot_probabilities.clear()\n slot_counter += 1\n\n result = self.__de_randomize_LP(self.news_pool, slots_assegnation_probabilities, self.lp_rand_tech)\n\n return result", "def solveLSM(self):\n ierr = c_int(1)\n self.fteik2d.fteik_solver2d_solveLSM(ierr)\n if (ierr.value != 0):\n print(\"Error solving eikonal equation\")\n return -1\n return 0\n #errorAll = 0\n #for i in range(self.nsrc):\n # isrc = i + 1\n # self.fteik2d.fteik_solver2d_solveSourceLSM(isrc, ierr)\n # if (ierr.value != 0):\n # print(\"Failed to solve for source %d\"%i+1)\n # errorAll = errorAll + 1\n #return errorAll", "def solve(self):\n\n # Set up display header if verbose operation enabled\n if self.opt['Verbose']:\n hdr = 'Itn DFidX PriResX DuaResX DFidG' + \\\n ' ResG '\n print(hdr)\n print('-' * len(hdr))\n\n # Main iteration loop\n for n in range(self.opt['MaxMainIter']):\n\n # At start of 2nd iteration, set the numbers of inner\n # iterations for the X and G solvers from the options\n # object for the outer solver\n if n == 1:\n self.slvX.opt['MaxMainIter'] = self.opt['XslvIter']\n self.slvG.opt['MaxMainIter'] = self.opt['GslvIter']\n\n # Run the configured number of iterations of the X (CSC)\n # solver and assign the result to X\n self.X = self.slvX.solve()\n\n # Compute the sum of the subpixel shifts of X\n Xhs = np.sum(fftconv(self.H, self.X.squeeze(), axes=(0, 1)),\n axis=-1)\n\n # Set the convolution kernel in the deconvolution solver\n # to the sum of the subpixel shifts of X\n self.slvG.setG(Xhs)\n # Run the configured number of iterations of the G\n # (deconvolution) solver and crop the result to obtain the\n # updated g\n self.g = self.slvG.solve()[0:self.gshp[0], 0:self.gshp[1]]\n\n # Construct a new dictionary for the X (CSC) solver from\n # the updated psf g\n self.D, self.dn = self.getD(self.g)\n self.slvX.setdict(self.D[..., np.newaxis, np.newaxis, :])\n\n # Display iteration statistics if verbose operation enabled\n if self.opt['Verbose']:\n itsX = self.slvX.getitstat()\n itsG = self.slvG.getitstat()\n fmt = '%3d %.3e %.3e %.3e %.3e %.3e'\n tpl = (n, itsX.DFid[-1], itsX.PrimalRsdl[-1],\n itsX.DualRsdl[-1], itsG.DFid[-1], itsG.Rsdl[-1])\n print(fmt % tpl)\n\n # Return the (normalised) psf estimate g\n return self.g / np.linalg.norm(self.g)", "def get_sol(self):", "def test_linear_buckling_iso_CCSS(plot_static=False, plot_lb=False):\n # number of nodes\n nx = 5 # along x\n ny = 5 # along y\n\n # getting integration points\n nint = 4\n points, weights = get_points_weights(nint=nint)\n\n # geometry\n a = 3 # along x\n b = 3 # along y\n\n # material properties\n E = 200e9\n nu = 0.3\n laminaprop = (E, E, nu)\n stack = [0]\n h = 0.001\n lam = read_stack(stack=stack, plyt=h, laminaprop=laminaprop)\n\n # creating mesh\n x = np.linspace(0, a, nx)\n y = np.linspace(0, b, ny)\n xmesh, ymesh = np.meshgrid(x, y)\n\n # node coordinates and position in the global matrix\n ncoords = np.vstack((xmesh.T.flatten(), ymesh.T.flatten())).T\n nids = 1 + np.arange(ncoords.shape[0])\n nid_pos = dict(zip(nids, np.arange(len(nids))))\n\n # identifying nodal connectivity for plate elements\n # similar than Nastran's CQUAD4\n #\n # ^ y\n # |\n #\n # 4 ________ 3\n # | |\n # | | --> x\n # | |\n # |_______|\n # 1 2\n\n\n nids_mesh = nids.reshape(nx, ny)\n n1s = nids_mesh[:-1, :-1].flatten()\n n2s = nids_mesh[1:, :-1].flatten()\n n3s = nids_mesh[1:, 1:].flatten()\n n4s = nids_mesh[:-1, 1:].flatten()\n\n num_elements = len(n1s)\n print('num_elements', num_elements)\n\n N = DOF*nx*ny\n Kr = np.zeros(KC0_SPARSE_SIZE*num_elements, dtype=INT)\n Kc = np.zeros(KC0_SPARSE_SIZE*num_elements, dtype=INT)\n Kv = np.zeros(KC0_SPARSE_SIZE*num_elements, dtype=DOUBLE)\n KGr = np.zeros(KG_SPARSE_SIZE*num_elements, dtype=INT)\n KGc = np.zeros(KG_SPARSE_SIZE*num_elements, dtype=INT)\n KGv = np.zeros(KG_SPARSE_SIZE*num_elements, dtype=DOUBLE)\n init_k_KC0 = 0\n init_k_KG = 0\n\n plates = []\n for n1, n2, n3, n4 in zip(n1s, n2s, n3s, n4s):\n plate = BFSPlate2D()\n plate.n1 = n1\n plate.n2 = n2\n plate.n3 = n3\n plate.n4 = n4\n plate.c1 = DOF*nid_pos[n1]\n plate.c2 = DOF*nid_pos[n2]\n plate.c3 = DOF*nid_pos[n3]\n plate.c4 = DOF*nid_pos[n4]\n plate.ABD = lam.ABD\n plate.lex = a/(nx - 1)\n plate.ley = b/(ny - 1)\n plate.init_k_KC0 = init_k_KC0\n plate.init_k_KG = init_k_KG\n update_KC0(plate, points, weights, Kr, Kc, Kv)\n init_k_KC0 += KC0_SPARSE_SIZE\n init_k_KG += KG_SPARSE_SIZE\n plates.append(plate)\n\n KC0 = coo_matrix((Kv, (Kr, Kc)), shape=(N, N)).tocsc()\n\n # applying boundary conditions\n\n # locating nodes\n bk = np.zeros(KC0.shape[0], dtype=bool) # constrained DOFs, can be used to prescribe displacements\n\n x = ncoords[:, 0]\n y = ncoords[:, 1]\n\n # applying boundary conditions\n # simply supported\n check = isclose(x, 0) | isclose(x, a) | isclose(y, 0) | isclose(y, b)\n bk[2::DOF] = check\n check = isclose(x, 0) | isclose(x, a)\n bk[3::DOF] = check\n # point supports\n check = isclose(x, a/2) & (isclose(y, 0) | isclose(y, b))\n bk[0::DOF] = check\n check = isclose(y, b/2) & (isclose(x, 0) | isclose(x, a))\n bk[1::DOF] = check\n\n # unconstrained nodes\n bu = ~bk # logical_not\n\n # defining external force vector\n fext = np.zeros(KC0.shape[0], dtype=float)\n\n # applying unitary load along u at x=a\n # nodes at vertices get 1/2 the force\n for plate in plates:\n pos1 = nid_pos[plate.n1]\n pos2 = nid_pos[plate.n2]\n pos3 = nid_pos[plate.n3]\n pos4 = nid_pos[plate.n4]\n if isclose(x[pos3], a):\n Nxx = -1\n xi = +1\n elif isclose(x[pos1], 0):\n Nxx = +1\n xi = -1\n else:\n continue\n lex = plate.lex\n ley = plate.ley\n indices = []\n c1 = DOF*pos1\n c2 = DOF*pos2\n c3 = DOF*pos3\n c4 = DOF*pos4\n cs = [c1, c2, c3, c4]\n for ci in cs:\n for i in range(DOF):\n indices.append(ci + i)\n fe = np.zeros(4*DOF, dtype=float)\n for j in range(nint):\n eta = points[j]\n plate.update_Nu(xi, eta)\n Nu = np.asarray(plate.Nu)\n fe += ley/2*weights[j]*Nu*Nxx\n fext[indices] += fe\n\n Kuu = KC0[bu, :][:, bu]\n fextu = fext[bu]\n\n # static solver\n uu = spsolve(Kuu, fextu)\n u = np.zeros(KC0.shape[0], dtype=float)\n u[bu] = uu\n\n if plot_static:\n import matplotlib\n matplotlib.use('TkAgg')\n import matplotlib.pyplot as plt\n plt.gca().set_aspect('equal')\n uplot = u[0::DOF].reshape(nx, ny).T\n vplot = u[1::DOF].reshape(nx, ny).T\n print('u extremes', uplot.min(), uplot.max())\n print('v extremes', vplot.min(), vplot.max())\n levels = np.linspace(uplot.min(), uplot.max(), 300)\n plt.contourf(xmesh, ymesh, uplot, levels=levels)\n plt.colorbar()\n plt.show()\n\n # eigenvalue solver\n\n # getting integration points\n for plate in plates:\n update_KG(u, plate, points, weights, KGr, KGc, KGv)\n KG = coo_matrix((KGv, (KGr, KGc)), shape=(N, N)).tocsc()\n KGuu = KG[bu, :][:, bu]\n\n # solving modified generalized eigenvalue problem\n # Original: (KC0 + lambda*KG)*v = 0\n # Modified: (-1/lambda)*KC0*v = KG*v #NOTE here we find (-1/lambda)\n num_eigenvalues = 5\n eigvals, eigvecsu = eigsh(A=KGuu, k=num_eigenvalues, which='SM', M=Kuu,\n tol=1e-6, sigma=1., mode='cayley')\n eigvals = -1./eigvals\n eigvecs = np.zeros((KC0.shape[0], num_eigenvalues), dtype=float)\n eigvecs[bu, :] = eigvecsu\n\n if plot_lb:\n import matplotlib\n matplotlib.use('TkAgg')\n import matplotlib.pyplot as plt\n plt.gca().set_aspect('equal')\n mode = 0\n wplot = eigvecs[2::DOF, mode].reshape(nx, ny).T\n levels = np.linspace(wplot.min(), wplot.max(), 300)\n plt.contourf(xmesh, ymesh, wplot, levels=levels)\n plt.colorbar()\n plt.show()\n\n kc = eigvals[0]/(E*np.pi**2*(h/b)**2/(12*(1 - nu**2))*h)\n assert isclose(kc, 6.6, rtol=0.05)", "def optimize(self):\n\n self.logger.info(\"Solving with Dynamic Slope Scaling Procedure in Julia :\")\n optimization_start = time.time()\n\n # 1. Preprocess for old network graph\n if self.old_network_graph is not None:\n\n # DSSP on old network\n old_network_obj = sum(list(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).values()))-1e-5\n try:\n self.check_infeasibility(self.old_network_graph, old_network_obj)\n except DHCOptimizerException as e:\n e.data = \"Invalid existing network: \" + e.data\n raise e\n\n flows, obj_val = self.optimize_with_dssp_julia(self.old_network_graph, old_network_obj, set())\n self.logger.info(\"Optimization phase time: %.2fs\" % (time.time() - optimization_start))\n solution_old_graph = self.build_solution_graph(self.old_network_graph, flows)\n\n if self.modify_old_network:\n\n # Add max capacity on old edges\n self.old_capacity = deepcopy(flows)\n old_buildings = list(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).values())\n for key in flows:\n if (key[1],key[0],0) not in self.old_capacity and key[1] not in old_buildings:\n self.old_capacity[(key[1],key[0],0)] = self.old_capacity[key]\n\n # Add Imaginary edges\n for edge in self.old_capacity:\n if self.optimization_graph.has_edge(*edge):\n\n # add nodes\n if not self.optimization_graph.has_node(config.IM_PREFIX+edge[0]):\n self.optimization_graph.add_node(config.IM_PREFIX+edge[0])\n self.optimization_graph.nodes[config.IM_PREFIX+edge[0]][config.GPD_GEO_KEY] = \\\n self.optimization_graph.nodes[edge[0]][config.GPD_GEO_KEY]\n if not self.optimization_graph.has_node(config.IM_PREFIX+edge[1]):\n self.optimization_graph.add_node(config.IM_PREFIX+edge[1])\n self.optimization_graph.nodes[config.IM_PREFIX+edge[1]][config.GPD_GEO_KEY] = \\\n self.optimization_graph.nodes[edge[1]][config.GPD_GEO_KEY]\n # add edges\n if not self.optimization_graph.has_edge(edge[0],config.IM_PREFIX+edge[0]):\n self.optimization_graph.add_edge(edge[0],config.IM_PREFIX+edge[0])\n if not self.optimization_graph.has_edge(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1]):\n self.optimization_graph.add_edge(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1])\n if not self.optimization_graph.has_edge(config.IM_PREFIX+edge[1],edge[1]):\n self.optimization_graph.add_edge(config.IM_PREFIX+edge[1],edge[1])\n\n # put cost\n self.optimization_graph.edges[(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1],0)][config.EDGE_COST_KEY] = \\\n self.optimization_graph.edges[(edge[0],edge[1],0)][config.EDGE_COST_KEY]\n self.optimization_graph.edges[(edge[0],edge[1],0)][config.EDGE_COST_KEY] = 1e-5\n self.optimization_graph.edges[(edge[0],config.IM_PREFIX+edge[0],0)][config.EDGE_COST_KEY] = 1e-5\n self.optimization_graph.edges[(config.IM_PREFIX+edge[1],edge[1],0)][config.EDGE_COST_KEY] = 1e-5\n\n else:\n # if we don't modify the old network, we have to change the capacity of the supplies\n already_consummed = {}\n for edge in solution_old_graph.edges():\n if solution_old_graph.nodes[edge[0]].get(config.NODE_TYPE_KEY) == config.SUPPLY_NODE_TYPE:\n already_consummed[edge[0]] = already_consummed.get(edge[0], 0) + \\\n solution_old_graph.edges[edge][config.SOLUTION_POWER_FLOW_KEY]\n for source in already_consummed:\n if already_consummed[source] <= self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY]:\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] -= already_consummed[source]\n self.network_objective -= already_consummed[source]\n else:\n self.network_objective -= self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY]\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] = 0\n\n # Remove edges from old network\n edges_to_remove = set()\n for e in self.optimization_graph.edges():\n if self.old_network_graph.has_edge(*e) or self.old_network_graph.has_edge(e[1],e[0]):\n edges_to_remove.add(e)\n self.optimization_graph.remove_edges_from(edges_to_remove)\n\n # Remove isolated buildings of optimization graph\n isolated_to_remove = set()\n for e in self.old_network_graph.edges():\n if e[0] in self.old_network_graph.nodes() and \\\n self.optimization_graph.nodes[e[1]].get(config.NODE_TYPE_KEY) == config.BUILDING_NODE_TYPE:\n isolated_to_remove.add(e)\n self.optimization_graph.remove_edges_from(isolated_to_remove)\n\n # Remove buildings from old network\n for n, data in self.old_network_graph.nodes(data=True):\n if data.get(config.NODE_TYPE_KEY) == config.BUILDING_NODE_TYPE:\n self.optimization_graph.remove_node(n)\n\n # Re-link sources\n sources = set()\n for n, data in self.optimization_graph.nodes(data=True):\n if data.get(config.NODE_TYPE_KEY) == config.SUPPLY_NODE_TYPE:\n sources.add(n)\n source_graph = self.optimization_graph.subgraph(sources).copy()\n self.optimization_graph.remove_nodes_from(sources)\n gnx.remove_isolates(self.optimization_graph)\n node_filter = lambda n: self.optimization_graph.nodes.get(n,{}).get(config.NODE_TYPE_KEY) != config.BUILDING_NODE_TYPE\n gnx.spatial_points_merge(self.optimization_graph, source_graph.nodes_to_gdf(), node_filter=node_filter, inplace=True)\n\n # fill missing information\n gnx.fill_edges_missing_geometry_attributes(self.optimization_graph)\n gnx.fill_length_attribute(self.optimization_graph, config.EDGE_LENGTH_KEY, only_missing=True)\n gnx.fill_length_attribute(self.optimization_graph, config.EDGE_COST_KEY, only_missing=True)\n for e in self.optimization_graph.edges(keys=True):\n self.optimization_graph.edges[e][config.LEASTCOST_COEF_KEY] = \\\n self.optimization_graph.edges[e].get(config.LEASTCOST_COEF_KEY,0)\n\n\n\n # 2. Process the DSSP on optimization graph\n self.check_is_ready()\n self.check_infeasibility(self.optimization_graph, self.network_objective)\n\n if self.old_network_graph is not None and self.modify_old_network:\n old_buildings = set(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).keys())\n else:\n old_buildings = set()\n flows, obj_val = self.optimize_with_dssp_julia(self.optimization_graph, self.network_objective, old_buildings,postprocess= (not self.modify_old_network))\n self.logger.info(\"Optimization phase time: %.2fs\" % (time.time() - optimization_start))\n self.solution_graph = self.build_solution_graph(self.optimization_graph, flows, self.connected)\n\n # 3. Postprocess for old network graph\n if self.old_network_graph is not None:\n \n if self.modify_old_network:\n # Put the right supply capacity and cost\n for edge in self.old_capacity:\n if self.solution_graph.has_edge(edge[0],edge[1]):\n self.solution_graph.edges[(edge[0],edge[1])][config.EDGE_COST_KEY] = \\\n self.optimization_graph.edges[(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1],0)][config.EDGE_COST_KEY]\n \n # Remove imaginary edges\n imaginary_nodes_to_remove = set()\n nodes_to_relabel = {}\n for edge in self.solution_graph.edges():\n if str(edge[0]).startswith(config.IM_PREFIX) and str(edge[1]).startswith(config.IM_PREFIX):\n real_edge = edge[0][len(config.IM_PREFIX):],edge[1][len(config.IM_PREFIX):]\n self.old_capacity[(real_edge[0], real_edge[1], 0)] = pd.np.inf\n self.old_capacity[(real_edge[1], real_edge[0], 0)] = pd.np.inf\n if not self.solution_graph.has_edge(*real_edge):\n for i in range(2):\n nodes_to_relabel[edge[i]] = real_edge[i]\n else:\n self.solution_graph.edges[real_edge[0],real_edge[1]][config.SOLUTION_POWER_FLOW_KEY] += \\\n self.solution_graph.edges[edge].get(config.SOLUTION_POWER_FLOW_KEY,0)\n imaginary_nodes_to_remove.add(edge[0])\n imaginary_nodes_to_remove.add(edge[1])\n elif str(edge[0]).startswith(config.IM_PREFIX):\n imaginary_nodes_to_remove.add(edge[0])\n elif str(edge[1]).startswith(config.IM_PREFIX):\n imaginary_nodes_to_remove.add(edge[1])\n\n nx.relabel_nodes(self.solution_graph, nodes_to_relabel, copy=False)\n self.solution_graph.remove_nodes_from(list(imaginary_nodes_to_remove))\n for node in nodes_to_relabel.values():\n if self.solution_graph.has_edge(node, node):\n self.solution_graph.remove_edge(node, node)\n\n else:\n for source in nx.get_node_attributes(self.solution_graph, config.SUPPLY_POWER_CAPACITY_KEY):\n self.solution_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] += already_consummed.get(source,0)\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] += already_consummed.get(source,0)\n\n return flows, obj_val", "def __solve_alternative_linear_problem(self, user):\n result = [0] * self.layout_slots\n de_rand_approach = \"greedy\"\n bins_per_category = []\n bins_cardinality = []\n for _ in range(len(self.categories)):\n bins_per_category.append([])\n bins_cardinality.append([])\n\n for cat in range(len(self.categories)):\n for _ in range(len(self.news_row_pivots) + 1):\n bins_per_category[cat].append([])\n bins_cardinality[cat].append([])\n for _ in range(len(self.news_column_pivots) + 1):\n bins_per_category[cat][-1].append([])\n bins_cardinality[cat][-1].append(0)\n\n for news in self.news_pool:\n category_index = self.categories.index(news.news_category)\n x, y = self.__compute_position_in_learning_matrix(user=user, news=news)\n bins_per_category[category_index][x][y].append(news)\n bins_cardinality[category_index][x][y] += 1\n\n index = 0\n bin_samples = []\n for cat in range(len(self.categories)):\n for x in range(len(self.news_row_pivots) + 1):\n for y in range(len(self.news_column_pivots) + 1):\n if (y == 0) and (x != 0):\n continue\n self.alt_B[index] = min(bins_cardinality[cat][x][y], self.layout_slots)\n index += 1\n try:\n selected_news = np.random.choice(bins_per_category[cat][x][y])\n self.sample_quality(selected_news, user, interest_decay=True)\n bin_samples += [selected_news.sampled_quality] * self.layout_slots\n except ValueError:\n bin_samples += [0] * self.layout_slots\n\n self.alt_C = np.array(list(np.array(self.alt_lambdas) * bin_samples)) * -1\n linear_problem = opt.linprog(A_ub=self.alt_A, b_ub=self.alt_B, c=self.alt_C)\n\n # FOR EACH SLOT, ISOLATES THE CORRESPONDING VARIABLES\n slots_assegnation_probabilities = []\n slot_counter = 0\n tmp_slot_probabilities = []\n while slot_counter < self.layout_slots:\n i = slot_counter\n while i < len(linear_problem.x):\n tmp_slot_probabilities.append(np.abs(linear_problem.x[i]))\n i += self.layout_slots\n slots_assegnation_probabilities.append(tmp_slot_probabilities.copy())\n tmp_slot_probabilities.clear()\n slot_counter += 1\n\n slot_promenances = self.real_slot_promenances.copy()\n slot_promenances_norm = np.array(slot_promenances) / sum(slot_promenances)\n slots_nr = [s for s in range(0, self.layout_slots)]\n for i in range(self.layout_slots):\n if de_rand_approach == \"ordered\":\n k = i\n elif (de_rand_approach == \"greedy\") or (de_rand_approach == \"greedy_max\"):\n k = np.argmax(slot_promenances)\n slot_promenances[k] = 0\n elif de_rand_approach == \"randomized\":\n k = np.random.choice(slots_nr, p=slot_promenances_norm)\n slot_promenances[k] = 0\n else:\n raise RuntimeError(\"De_randomization approach not recognized. Try either 'ordered', 'greedy', \"\n \"'randomized' or 'greedy_max'.\")\n\n target_slot_probabilities = [x for x in slots_assegnation_probabilities[k]]\n target_slot_probabilities_norm = np.array(target_slot_probabilities) / sum(target_slot_probabilities)\n if de_rand_approach == \"greedy_max\":\n assigning_bin_index = np.argmax(target_slot_probabilities)\n cat_index = int(assigning_bin_index / self.num_of_bins)\n x = self.bins_for_position[int(assigning_bin_index)][0]\n y = self.bins_for_position[int(assigning_bin_index)][1]\n\n else:\n assigning_bin = np.random.choice([x for x in range(len(slots_assegnation_probabilities[k]))], p=target_slot_probabilities_norm)\n cat_index = int(assigning_bin / self.num_of_bins)\n x = self.bins_for_position[int(assigning_bin)][0]\n y = self.bins_for_position[int(assigning_bin)][1]\n\n result[k] = np.random.choice(bins_per_category[cat_index][x][y])\n\n return result", "def incompatibility_solve_cg(self, useAMS=True):\n \n zero = Expression((\"0.0\", \"0.0\", \"0.0\"), degree=1)\n bc = DirichletBC(self.PN, zero, DirichletBoundary())\n \n T1 = Function(self.PN) # Solution for the curl curl problem\n T2 = Function(self.PN) # Solution for the curl curl problem\n T3 = Function(self.PN) # Solution for the curl curl problem\n\n if useAMS:\n \n # Set operator for the linear solver\n L_X = inner(self.strain_diff_1, curl(self.inc_v0))*dx\n A_X, b_X = assemble_system(self.a_X, L_X, bc)\n self.ksp_X.setOperators(as_backend_type(A_X).mat())\n self.ksp_X.solve(as_backend_type(b_X).vec(), as_backend_type(T1.vector()).vec())\n\n # Show linear solver details\n self.ksp_X.view()\n\n # Solve 2nd system\n L_X = inner(self.strain_diff_2, curl(self.inc_v0))*dx\n A_X, b_X = assemble_system(self.a_X, L_X, bc)\n self.ksp_X.setOperators(as_backend_type(A_X).mat())\n self.ksp_X.solve(as_backend_type(b_X).vec(), as_backend_type(T2.vector()).vec())\n\n # Solve 3nd system\n L_X = inner(self.strain_diff_3, curl(self.inc_v0))*dx\n A_X, b_X= assemble_system(self.a_X, L_X, bc)\n self.ksp_X.setOperators(as_backend_type(A_X).mat())\n self.ksp_X.solve(as_backend_type(b_X).vec(), as_backend_type(T3.vector()).vec())\n \n else:\n\n ### vanilla CG works with potential as RHS\n\n L_X = inner(self.strain_diff_1, curl(self.inc_v0))*dx\n solve(self.a_X == L_X, T1, bc, \n solver_parameters={'linear_solver': 'cg', 'preconditioner': 'jacobi'}) \n\n L_X = inner(self.strain_diff_2, curl(self.inc_v0))*dx\n solve(self.a_X == L_X, T2, bc, \n solver_parameters={'linear_solver': 'cg', 'preconditioner': 'jacobi'}) \n\n L_X = inner(self.strain_diff_3, curl(self.inc_v0))*dx\n solve(self.a_X == L_X, T3, bc, \n solver_parameters={'linear_solver': 'cg', 'preconditioner': 'jacobi'})\n\n return project( self.X_0(curl(T1),curl(T2),curl(T3)), \n self.TFS, solver_type=\"cg\", preconditioner_type=\"ilu\")", "def g_solving_subproblem_of_LR(self,vehicle_id):\r\n global_LB=-10000\r\n global_UB=10000\r\n iteration_for_RSP=20\r\n optimal_solution_for_RSP=None\r\n optimal_value_y=0\r\n self.multiplier_v=0.5\r\n\r\n #solve the expected shortest path problem\r\n self.g_dynamic_programming_algorithm(vehicle_id, 4)\r\n #obtain the variance\r\n y_=self.g_ending_state_vector[vehicle_id].VSStateVector[0].Primal_Label_cost_variance\r\n\r\n for k in range(iteration_for_RSP):\r\n # print(k)\r\n LB=0\r\n # step 2: solve decomposed dual problems\r\n # Part I: subproblem of x\r\n self.g_dynamic_programming_algorithm(vehicle_id, 2)\r\n LB+=self.g_ending_state_vector[vehicle_id].VSStateVector[0].Label_cost_for_lagrangian\r\n\r\n # Part II: subproblem of y\r\n obj_of_y_ = self.reliability * (y_) ** 0.5 - self.multiplier_v * y_\r\n if obj_of_y_ > 0:\r\n y = 0\r\n LB += 0\r\n else:\r\n y = y_\r\n LB += obj_of_y_\r\n # generate an upper bound\r\n variance = self.g_ending_state_vector[vehicle_id].VSStateVector[0].Primal_Label_cost_variance\r\n Label_cost_for_lagrangian_mean=self.g_ending_state_vector[vehicle_id].VSStateVector[0].Label_cost_for_lagrangian_mean\r\n UB=Label_cost_for_lagrangian_mean+self.reliability*(variance)**0.5\r\n\r\n # print(\"UB:{}\".format(UB))\r\n # print(\"LB:{}\".format(LB))\r\n\r\n # UB and LB update\r\n if LB > global_LB:\r\n global_LB = LB\r\n optimal_solution_for_RSP = self.g_ending_state_vector[vehicle_id].VSStateVector\r\n optimal_value_y = y\r\n\r\n if UB < global_UB:\r\n global_UB = UB\r\n\r\n\r\n # step 3: update multipliers\r\n if variance-y!= 0:\r\n self.multiplier_v+= (global_UB - LB) / (variance-y)\r\n # if self.multiplier_v<0:\r\n # self.multiplier_v=1\r\n # print(self.multiplier_v)\r\n\r\n # step 4: termination condition test\r\n if global_UB != 0:\r\n gap = abs((global_UB-global_LB) / global_UB)\r\n # print(gap)\r\n if gap < 0.02:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP, optimal_value_y,global_LB,global_UB\r\n else:\r\n if global_UB - global_LB == 0:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP,optimal_value_y,global_LB,global_UB\r\n\r\n if k == iteration_for_RSP - 1:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP,optimal_value_y,global_LB,global_UB", "def solve():\n\n s, g, e = make_lattice(21)\n stack = deque([[e]])\n vals = {s: 1}\n max_n = 0\n\n while stack:\n max_n = max(max_n, len(stack))\n n, *p = stack.pop()\n for c in g.get_connected(n):\n if c > n:\n continue\n if c in vals:\n propagate(c, [n] + p, vals)\n else:\n stack.append([c, n] + p)\n return vals[e]", "def __solve_linear_problem(self, continuity_relaxation=True):\n result = [0] * self.layout_slots\n self.news_pool.sort(key=lambda x: (x.news_category, x.sampled_quality), reverse=True)\n LP_news_pool = []\n done_for_category = False\n category_count = 0\n prev_category = self.news_pool[0].news_category\n # First build a subset of news to easily handle the LP resolution\n for news in self.news_pool:\n if prev_category != news.news_category:\n if category_count < self.layout_slots:\n raise RuntimeWarning(\"Not enough news per category found. There should be at least \" +\n str(self.layout_slots) + \" news with category = \" + prev_category + \", but \"\n \"only \" + str(category_count) + \"are present. The allocation maybe \"\n \"sub-optimal.\")\n category_count = 0\n done_for_category = False\n prev_category = news.news_category\n if not done_for_category:\n LP_news_pool.append(news)\n category_count += 1\n if category_count == self.layout_slots:\n done_for_category = True\n\n # If not all the required news are present, add some other news at random.\n while len(LP_news_pool) < len(self.categories) * self.layout_slots:\n random_news = np.random.choice(self.news_pool)\n if random_news not in LP_news_pool:\n LP_news_pool.append(random_news)\n\n LP_news_pool.sort(key=lambda x: x.news_category, reverse=False)\n thetas = []\n # Compute the vector of coefficients for the LP objective function\n for news in LP_news_pool:\n thetas += [news.sampled_quality] * self.layout_slots\n self.C = list(np.array(thetas) * np.array(self.lambdas))\n\n # Then solve an LP or an ILP\n if continuity_relaxation:\n linear_problem = opt.linprog(A_ub=self.A, b_ub=self.B, c=self.C)\n slots_assegnation_probabilities = []\n slot_counter = 0\n tmp_slot_probabilities = []\n while slot_counter < self.layout_slots:\n i = slot_counter\n while i < len(linear_problem.x):\n tmp_slot_probabilities.append(np.abs(linear_problem.x[i]))\n i += self.layout_slots\n slots_assegnation_probabilities.append(tmp_slot_probabilities.copy())\n tmp_slot_probabilities.clear()\n slot_counter += 1\n\n self.measure_allocation_diversity_bounds_errors(slots_assegnation_probabilities, LP_news_pool, iter=10)\n\n result = self.__de_randomize_LP(LP_news_pool, slots_assegnation_probabilities, self.lp_rand_tech)\n\n else:\n # INITIALIZES AN INTEGER LINEAR PROBLEM\n ILP = LpProblem(\"News_ILP\", LpMaximize)\n ILP_variables = []\n\n for cat in range(len(self.categories)):\n for j in range(self.layout_slots):\n for s in range(self.layout_slots):\n ILP_variables.append(LpVariable(name=str(cat) + \"_\" + str(j) + \"_\" + str(s), lowBound=0, upBound=1, cat=\"Binary\"))\n\n # Objective function addition to the problem\n C = list(np.array(self.C) * -1)\n ILP += lpSum([C[i] * ILP_variables[i] for i in range(len(self.C))])\n\n # Category constraints addition to the problem\n for i in range(len(self.categories)):\n ILP += lpSum([self.A[i][j] * ILP_variables[j] for j in range(len(self.C))]) <= self.B[i]\n\n # Slots capacity constraints addition to the problem\n for i in range(len(self.categories), len(self.categories) + self.layout_slots):\n ILP += lpSum([self.A[i][j] * ILP_variables[j] for j in range(len(self.C))]) <= self.B[i]\n\n # News capacity constraints addition to the problem\n for i in range(len(self.categories) + self.layout_slots, len(self.categories) + self.layout_slots + len(self.categories) * self.layout_slots):\n ILP += lpSum([self.A[i][j] * ILP_variables[j] for j in range(len(self.C))]) <= self.B[i]\n\n ILP.solve()\n\n # FOR EACH SLOT, ISOLATES THE CORRESPONDING VARIABLES\n slots_assegnation_probabilities = []\n slot_counter = 0\n tmp_slot_probabilities = []\n while slot_counter < self.layout_slots:\n i = slot_counter\n while i < len(ILP.variables()):\n tmp_slot_probabilities.append(ILP.variables().__getitem__(i))\n i += self.layout_slots\n slots_assegnation_probabilities.append(tmp_slot_probabilities.copy())\n tmp_slot_probabilities.clear()\n slot_counter += 1\n\n # TAKES THE VARIABLES WHICH VALUE IS 1, THEN ALLOCATES THE CORRESPONDING NEWS IN THE RESULT PAGE\n for i in range(len(result)):\n for probabilities in slots_assegnation_probabilities[i]:\n if probabilities.varValue > 0:\n var_name = probabilities.name\n break\n indexes = var_name.split(\"_\")\n category_index = int(indexes[0])\n news_number = int(indexes[1])\n news_index = category_index * self.layout_slots + news_number\n result[i] = LP_news_pool[news_index]\n\n return result", "def solve(self):", "def potentialSolver4(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def solve(self):\n # check for jacobian and set it if present and to be used\n if self.use_sparse:\n if self._use_jac and hasattr(self.problem,'sparse_jac'):\n jac = self.problem.sparse_jac\n else:\n jac = None\n else:\n if self._use_jac and hasattr(self.problem,'jac'):\n jac = self.problem.jac\n else:\n jac = None\n \n # Initialize solver and solve \n \n solved = False\n local_min = False\n\n res = N.zeros(self.x0.__len__())\n while (not solved) and self.reg_count < 2:\n try:\n if self._use_fscale:\n self.solver.KINSOL_init(self.func,self.x0,self.dim,jac,self.constraints,self.use_sparse,self.verbosity,self.norm_of_res,self.reg_param,self.fscale)\n else:\n self.solver.KINSOL_init(self.func,self.x0,self.dim,jac,self.constraints,self.use_sparse,self.verbosity,self.norm_of_res,self.reg_param,None)\n start = time.clock()\n res = self.solver.KINSOL_solve(not self._use_ls)\n stop = time.clock()\n self.exec_time += (stop - start)\n solved = True\n except KINError as error:\n if error.value == 42:\n # Try the heuristic\n if hasattr(self.problem, 'get_heuristic_x0'):\n print \"----------------------------------------------------\"\n print \" Solver stuck with zero step-length.\"\n print \"----------------------------------------------------\"\n print \"The following variables have start value zero\"\n print \"and min set to zero causing the zero step-lenght.\"\n print \"These settings are either set by default or by user.\"\n print \"\"\n\n self.x0 = self.problem.get_heuristic_x0()\n self.reg_count += 1\n \n print \"\"\n print \"This setting (start and min to zero) can often\"\n print \"cause problem when initializing the system. \"\n print \"\"\n print \"To avoid this the above variables have\"\n print \"their start attributes reset to one.\"\n print \"\"\n print \"Trying to solve the system again...\"\n else:\n raise KINSOL_Exception(\"Regularization failed due to constraints, tried getting heuristic initial guess but failed.\")\n \n\n elif (error.value == 2):\n print \"---------------------------------------------------------\"\n print \"\"\n print \" !!! WARNING !!!\"\n print \"\"\n print \" KINSOL has returned a result but the algorithm has converged\"\n print \" to a local minima, the initial values are NOT consistant!\"\n print \"\"\n print \"---------------------------------------------------------\"\n solved = True\n local_min = True\n else:\n # Other error, send onward as exception\n self.problem.check_constraints(res)\n raise KINSOL_Exception(error.msg[error.value])\n \n if not solved:\n self.solver.Free_KINSOL()\n raise KINSOL_Exception(\"Algorithm exited solution loop without finding a solution, please contact Assimulo support.\")\n\n if self.check_with_model:\n self.problem.check_constraints(res)\n if not local_min:\n print \"Problem sent to KINSOL solved.\"\n \n return res", "def optimize(o_molsys, computer):\n logger = logging.getLogger(__name__)\n\n # Take care of some initial variable declarations\n step_number = 0 # number of steps taken. Partial. IRC alg uses two step counters\n irc_step_number = None\n total_steps_taken = 0\n H = 0 # hessian in internals\n\n # Try to optimize one structure OR set of IRC points. OptError and all Exceptions caught below.\n try:\n\n # Prepare for multiple IRC computation\n if op.Params.opt_type == \"IRC\":\n irc_step_number = 0\n IRCdata.history = IRCdata.IRCdata()\n IRCdata.history.set_atom_symbols(o_molsys.atom_symbols)\n # Why do we need to have IRCdata.history store its own copy?\n IRCdata.history.set_step_size_and_direction(op.Params.irc_step_size, op.Params.irc_direction)\n logger.debug(\"\\tIRC data object created\\n\")\n\n converged = False\n # o_molsys = make_internal_coords(o_molsys)\n if not o_molsys.intcos_present:\n make_internal_coords(o_molsys)\n logger.debug(\"Molecular systems after make_internal_coords:\")\n logger.debug(str(o_molsys))\n\n # following loop may repeat over multiple algorithms OR over IRC points\n while not converged:\n try:\n # if optimization coordinates are absent, choose them. Could be erased after AlgError\n if not o_molsys.intcos_present:\n make_internal_coords(o_molsys)\n logger.debug(\"Molecular systems after make_internal_coords:\")\n logger.debug(str(o_molsys))\n\n logger.info(\"\\tStarting optimization algorithm.\\n\")\n logger.info(str(o_molsys))\n\n # Do special initial step-0 for each IRC point.\n # For IRC point, we form/get the Hessian now.\n\n if op.Params.opt_type == \"IRC\":\n if irc_step_number == 0:\n # Step along lowest eigenvector of mass-weighted Hessian.\n logger.info(\"\\tBeginning IRC from the transition state.\\n\")\n logger.info(\"\\tStepping along lowest Hessian eigenvector.\\n\")\n\n H, gX = get_pes_info(H, computer, o_molsys, step_number, irc_step_number)\n logger.debug(print_mat_string(H, title=\"Transformed Hessian in internals.\"))\n\n # Add the transition state as the first IRC point\n q_0 = o_molsys.q_array()\n x_0 = o_molsys.geom\n f_q = o_molsys.gradient_to_internals(gX, -1.0)\n f_x = np.multiply(-1, gX)\n E = computer.energies[-1]\n\n IRCdata.history.add_irc_point(0, q_0, x_0, f_q, f_x, E)\n irc_step_number += 1\n\n # Lowest eigenvector of mass-weighted Hessian.\n G = o_molsys.Gmat(massWeight=True)\n G_root = symm_mat_root(G)\n H_q_m = np.dot(np.dot(G_root, H), G_root.T)\n vM = lowest_eigenvector_symm_mat(H_q_m)\n logger.info(print_array_string(vM, title=\"Lowest evect of H_q_M\"))\n\n # Un mass-weight vector.\n G_root_inv = symm_mat_inv(G_root, redundant=True)\n v = np.dot(G_root_inv, vM)\n\n if op.Params.irc_direction == \"BACKWARD\":\n v *= -1\n # end if IRCStepNumber == 0\n\n else: # Step along gradient.\n logger.info(\"\\tBeginning search for next IRC point.\\n\")\n logger.info(\"\\tStepping along gradient.\\n\")\n v = IRCdata.history.f_q()\n irc_step_number += 1\n\n IRCfollowing.compute_pivot_and_guess_points(o_molsys, v, op.Params.irc_step_size)\n # end if 'IRC'\n\n for step_number in range(op.Params.alg_geom_maxiter):\n header = f\"{'----------------------------':^74}\"\n header += f\"\\n{'Taking A Step: Step Number %d' % (step_number + 1):^90}\"\n header += f\"\\n{'----------------------------':^90}\"\n logger.info(header)\n total_steps_taken += 1\n\n H, gX = get_pes_info(H, computer, o_molsys, step_number, irc_step_number)\n E = computer.energies[-1]\n\n logger.info(\"%s\", print_geom_grad(o_molsys.geom, gX))\n\n if op.Params.print_lvl >= 4:\n hessian.show(H, o_molsys)\n\n f_q = o_molsys.gradient_to_internals(gX, -1.0)\n o_molsys.apply_external_forces(f_q, H, step_number)\n o_molsys.project_redundancies_and_constraints(f_q, H)\n o_molsys.q_show()\n\n if op.Params.test_B:\n testB.test_b(o_molsys)\n if op.Params.test_derivative_B:\n testB.test_derivative_b(o_molsys)\n\n # Check if forces indicate we are approaching minimum.\n if op.Params.opt_type == \"IRC\" and irc_step_number > 2:\n if IRCdata.history.test_for_irc_minimum(f_q):\n logger.info(\"A minimum has been reached on the IRC. Stopping here.\\n\")\n raise IRCendReached()\n\n logger.info(print_array_string(f_q, title=\"Internal forces in au:\"))\n\n history.oHistory.append(o_molsys.geom, E, f_q) # Save initial step info.\n history.oHistory.nuclear_repulsion_energy = computer.trajectory[-1][\"properties\"][\n \"nuclear_repulsion_energy\"\n ]\n\n # Analyze previous step performance; adjust trust radius accordingly.\n # Returns true on first step (no history)\n lastStepOK = history.oHistory.current_step_report()\n\n # If step was bad, take backstep here or raise exception.\n if lastStepOK:\n history.oHistory.consecutiveBacksteps = 0\n else:\n # Don't go backwards until we've gone a few iterations.\n if len(history.oHistory.steps) < 5:\n logger.info(\"\\tNear start of optimization, so ignoring bad step.\\n\")\n elif history.History.consecutiveBacksteps < op.Params.consecutiveBackstepsAllowed:\n history.History.consecutiveBacksteps += 1\n logger.info(\n \"\\tCalling for consecutive backstep number %d.\\n\" % history.History.consecutiveBacksteps\n )\n stepAlgorithms.take_step(o_molsys, E, f_q, H, stepType=\"BACKSTEP\")\n logger.info(\"\\tStructure for next step (au):\\n\")\n o_molsys.show_geom()\n continue\n elif op.Params.dynamic_level == 0: # not using dynamic level, so ignore.\n logger.info(\"\\tNo more backsteps allowed.\" + \"Dynamic level is off.\\n\")\n pass\n else:\n raise AlgError(\"Bad step, and no more backsteps allowed.\")\n\n if op.Params.opt_type == \"IRC\":\n DqGuess = IRCdata.history.q_pivot() - IRCdata.history.q()\n Dq = IRCfollowing.dq_irc(o_molsys, E, f_q, H, op.Params.irc_step_size, DqGuess)\n else: # Displaces and adds step to history.\n Dq = stepAlgorithms.take_step(o_molsys, E, f_q, H, op.Params.step_type, computer)\n\n if op.Params.opt_type == \"IRC\":\n converged = convcheck.conv_check(\n step_number,\n o_molsys,\n Dq,\n f_q,\n computer.energies,\n IRCdata.history,\n )\n logger.info(\"\\tConvergence check returned %s.\" % converged)\n\n if converged:\n q_irc_point = o_molsys.q_array()\n forces_irc_point = o_molsys.gradient_to_internals(gX, -1.0)\n lineDistStep = IRCfollowing.calc_line_dist_step(o_molsys)\n arcDistStep = IRCfollowing.calc_arc_dist_step(o_molsys)\n\n IRCdata.history.add_irc_point(\n irc_step_number,\n q_irc_point,\n o_molsys.geom,\n forces_irc_point,\n np.multiply(-1, gX),\n computer.energies[-1],\n lineDistStep,\n arcDistStep,\n )\n IRCdata.history.progress_report()\n\n else: # not IRC.\n converged = convcheck.conv_check(step_number, o_molsys, Dq, f_q, computer.energies)\n logger.info(\"\\tConvergence check returned %s\" % converged)\n\n if converged: # changed from elif when above if statement active\n logger.info(\"\\tConverged in %d steps!\" % (step_number + 1))\n logger.info(\"\\tFinal energy is %20.13f\" % E)\n logger.info(\"\\tFinal structure (Angstroms): \\n\" + o_molsys.show_geom())\n break # break out of step_number loop\n\n logger.info(\"\\tStructure for next step (au):\\n\" + o_molsys.show_geom())\n\n # Hard quit if too many total steps taken (inc. all IRC points and algorithms).\n\n if total_steps_taken == op.Params.geom_maxiter:\n logger.error(\n \"\\tTotal number of steps (%d) exceeds maximum allowed (%d).\\n\"\n % (total_steps_taken, op.Params.geom_maxiter)\n )\n raise OptError(\n \"Maximum number of steps exceeded: {}.\".format(op.Params.geom_maxiter),\n \"OptError\",\n )\n\n else: # Associated with above for loop, executes if break is not reached\n logger.error(\n \"\\tNumber of steps (%d) exceeds maximum for algorithm (%d).\\n\"\n % (step_number + 1, op.Params.alg_geom_maxiter)\n )\n raise AlgError(\"Maximum number of steps exceeded for algorithm\")\n\n # For IRC, save and queue up for the optimization of the next point.\n if op.Params.opt_type == \"IRC\":\n if irc_step_number == op.Params.irc_points:\n logger.info(f\"\\tThe requested {op.Params.irc_points} IRC points have been obtained.\")\n raise IRCendReached()\n else:\n logger.info(\"\\tStarting search for next IRC point.\")\n logger.info(\"\\tClearing old constrained optimization history.\")\n history.oHistory.reset_to_most_recent() # delete old steps\n converged = False\n\n # Catch non-fatal algorithm errors and try modifying internals,\n # changing run-levels, optimization parameters, etc. and start over again.\n except AlgError as AF:\n logger.error(\"\\n\\tCaught AlgError exception\\n\")\n eraseIntcos = False\n\n if AF.linearBends:\n # New linear bends detected; Add them, and continue at current level.\n # from . import bend # import not currently being used according to IDE\n for l in AF.linearBends:\n if l.bend_type == \"LINEAR\": # no need to repeat this code for \"COMPLEMENT\"\n iF = addIntcos.check_fragment(l.atoms, o_molsys)\n F = o_molsys.fragments[iF]\n intcosMisc.remove_old_now_linear_bend(l.atoms, F.intcos)\n F.add_intcos_from_connectivity()\n eraseHistory = True\n elif op.Params.dynamic_level == op.Params.dynamic_level_max:\n logger.critical(\"\\n\\t Current algorithm/dynamic_level is %d.\\n\" % op.Params.dynamic_level)\n logger.critical(\"\\n\\t Alternative approaches are not available or turned on.\\n\")\n raise OptError(\"Maximum dynamic_level reached.\")\n else:\n op.Params.dynamic_level += 1\n logger.warning(\"\\n\\t Increasing dynamic_level algorithm to %d.\\n\" % op.Params.dynamic_level)\n logger.warning(\"\\n\\t Erasing old history, hessian, intcos.\\n\")\n eraseIntcos = True\n eraseHistory = True\n op.Params.updateDynamicLevelParameters(op.Params.dynamic_level)\n\n if eraseIntcos:\n logger.warning(\"\\n\\t Erasing coordinates.\\n\")\n for f in o_molsys.fragments:\n del f.intcos[:]\n\n if eraseHistory:\n logger.warning(\"\\n\\t Erasing history.\\n\")\n step_number = 0\n del H\n H = 0\n del history.oHistory[:] # delete steps in history\n history.oHistory.stepsSinceLastHessian = 0\n history.oHistory.consecutiveBacksteps = 0\n\n # print summary\n logger.info(\"\\tOptimization Finished\\n\" + history.oHistory.summary_string())\n\n if op.Params.opt_type == \"linesearch\":\n logger.info(\"\\tObtaining gradient at the final geometry for line-search optimization\\n\")\n # Calculate gradient to show user\n gX = computer.compute(o_molsys.geom, driver=\"gradient\", return_full=False)\n del gX\n qc_output = prepare_opt_output(o_molsys, computer, error=None)\n\n del H\n del history.oHistory[:]\n o_molsys.clear()\n del op.Params\n return qc_output\n\n # Expect to hit this error. not an issue\n except IRCendReached:\n\n logger.info(\"\\t\\tFinal IRC Point\\n%s\", o_molsys)\n logger.info(\"Tabulating rxnpath results.\")\n IRCdata.history.progress_report()\n np.multiply(-1, IRCdata.history.f_x(-1))\n rxnpath = IRCdata.history.rxnpath_dict()\n\n logger.info(rxnpath)\n\n qc_output = prepare_opt_output(o_molsys, computer, rxnpath=rxnpath, error=None)\n\n # delete some stuff\n del H\n del history.oHistory[:]\n o_molsys.clear()\n del op.Params\n return qc_output\n\n # Fatal error. Cannot proceed.\n except OptError as error:\n logger.critical(\"\\tA critical optimization-specific error has occured.\\n\")\n logger.critical(\"\\tResetting all optimization options for potential queued jobs.\\n\")\n logger.exception(\"Error Type: \" + str(type(error)))\n logger.exception(\"Error caught:\" + str(error))\n # Dump histories if possible\n try:\n logging.debug(\"\\tDumping history: Warning last point not converged.\\n\" + history.oHistory.summary_string())\n if op.Params.opt_type == \"IRC\":\n logging.info(\"\\tDumping IRC points completed\")\n IRCdata.history.progress_report()\n del history.oHistory[:]\n except NameError:\n pass\n\n rxnpath = None\n if op.Params.opt_type == \"IRC\":\n rxnpath = IRCdata.history.rxnpath_dict()\n logger.debug(rxnpath)\n\n qc_output = prepare_opt_output(o_molsys, computer, rxnpath=rxnpath, error=error)\n\n del history.oHistory[:]\n o_molsys.clear()\n del op.Params\n del computer\n\n return qc_output\n\n except Exception as error:\n logger.critical(\"\\tA non-optimization-specific error has occurred.\\n\")\n logger.critical(\"\\tResetting all optimization options for potential queued jobs.\\n\")\n logger.exception(\"Error Type: \" + str(type(error)))\n logger.exception(\"Error caught:\" + str(error))\n\n rxnpath = None\n if len(history.oHistory.steps) >= 1:\n rxnpath = None\n if op.Params.opt_type == \"IRC\":\n rxnpath = IRCdata.history.rxnpath_dict()\n logger.debug(rxnpath)\n\n qc_output = prepare_opt_output(o_molsys, computer, rxnpath=rxnpath, error=error)\n\n del history.oHistory[:]\n o_molsys.clear()\n del op.Params\n del computer\n\n return qc_output", "def solver_mll(X, y, C, S, alpha=0.1, max_iter=1000, tol=1e-4, positive=False):\n n_tasks, n_samples, n_features = X.shape\n lasso = Lasso(alpha=alpha, fit_intercept=False, positive=positive)\n lasso_p = Lasso(alpha=alpha / n_tasks, fit_intercept=False,\n positive=True)\n old_theta = C[:, None] * S\n\n for i in range(max_iter):\n W = X * C[None, None, :]\n for k in range(n_tasks):\n lasso.fit(W[k], y[k])\n S[:, k] = lasso.coef_\n Z = S.T[:, None, :] * X\n Z = Z.reshape(n_tasks * n_samples, n_features)\n lasso_p.fit(Z, y.flatten())\n C = lasso_p.coef_\n theta = C[:, None] * S\n dll = abs(theta - old_theta).max()\n dll /= max(abs(theta).max(), abs(old_theta).max(), 1.)\n old_theta = theta.copy()\n\n if dll < tol:\n break\n\n if i == max_iter - 1:\n warnings.warn('Objective did not converge.' +\n ' You might want' +\n ' to increase the number of iterations.' +\n ' Fitting data with very small alpha' +\n ' may cause precision problems.',\n ConvergenceWarning)\n return C, S, i", "def potentialSolver5(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def potentialSolver3(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def potentialSolver3(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def sweptBlock(solver):\n #Create and fill shared array\n createCPUSharedArray(solver,numpy.zeros(solver.sharedShape,dtype=solver.dtype).nbytes)\n for i in range(solver.intermediate):\n solver.sharedArray[i,:,:,:] = solver.initialConditions[solver.globalBlock]\n #Create phase objects\n solver.Up = geometry.Geometry() \n solver.Down = geometry.Geometry() \n solver.Xb = geometry.Geometry()\n solver.Yb = geometry.Geometry()\n solver.Oct = geometry.Geometry() \n\n if solver.gpuBool:\n # Creating cuda device and context\n cuda.init()\n cuda_device = cuda.Device(solver.gpuRank)\n solver.cuda_context = cuda_device.make_context()\n setupGPUSwept(solver)\n #Setting up CPU\n setupCPUSwept(solver)\n solver.comm.Barrier() #Ensure all processes are", "def nodal2D_steady_fixed_source(Dims,Lengths,BCs,D,Sigma,Q, tolerance=1.0e-12, phi_solution=0., LOUD=False, maxits=100):\n I = Dims[0]\n J = Dims[1]\n K = Dims[2]\n L = I*J*K\n Nx = Lengths[0]\n Ny = Lengths[1]\n Nz = Lengths[2]\n \n hx,hy,hz = np.array(Lengths)/np.array(Dims)\n ihx2,ihy2,ihz2 = (1.0/hx**2,1.0/hy**2,1.0/hz**2)\n\n if (type(phi_solution) != np.ndarray):\n phi_solution = np.zeros((2,I,J,5))\n phi_new = phi_solution.copy()\n iteration = 1\n converged = 0\n localBCs = np.ones((2,3))\n\n #reshape Q if necessary\n if Q.shape != (I,J,K,5):\n Q_new = np.zeros((I,J,K,5))\n Q_new[:,:,:,0] = Q[:,:,:]\n Q = Q_new\n\n #iterate over the x directions\n k=0\n while not(converged):\n \n #Solve for x direction\n d = 0 #solv direction\n tr_id = 1 #trans direction idx in array\n for j in range(J): #spatial loop over J coordinates\n for i in range(I): #spatial loop over X coordinates\n\n if not(i==0):\n phi_left = phi_solution[d,i-1,j,:]\n C = positive_current(phi_left,hx/2,hx,D[i-1,j,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[0,0:3] = [0.25,-D[i,j,k]/2,C]\n else:\n localBCs[0,:] = BCs[0,:].copy()\n localBCs[0,1] *= D[i,j,k]\n if not(i==(I-1)):\n phi_rt = phi_solution[d,i+1,j,:]\n C = negative_current(phi_rt,-hx/2,hx,D[i+1,j,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[1,0:3] = [.25,D[i,j,k]/2,C]\n else:\n localBCs[1,:] = BCs[1,:].copy()\n localBCs[1,1] *= D[i,j,k]\n \n #Compute transverse fluxes\n if i==0:\n nbr_ids = [i,i,i+1] #Assume constant along left edge\n elif i==(I-1):\n nbr_ids = [i-1,i,i] #assume constant along right edge\n else:\n nbr_ids = [i-1,i,i+1] #interior cell\n\n if not j==(J-1):\n top_phis = phi_solution[tr_id,nbr_ids,j,:]\n top_Ds = D[nbr_ids,j,k]\n Ltop_quad = transverse_leakage_dof(top_phis,hy/2.,hy,hx,top_Ds)\n else:\n top_phis = phi_solution[tr_id,nbr_ids,j,:]\n top_Ds = D[nbr_ids,j,k]\n Ltop_quad = transverse_leakage_dof(top_phis,hy/2.,hy,hx,top_Ds)\n #Ltop_quad = (0., 0, 0)\n\n if not j==0:\n bot_phis = phi_solution[tr_id,nbr_ids,j,:]\n bot_Ds = D[nbr_ids,j,k]\n Lbot_quad = transverse_leakage_dof(bot_phis,-hy/2.,hy,hx,bot_Ds)\n else:\n bot_phis = phi_solution[tr_id,nbr_ids,j,:]\n bot_Ds = D[nbr_ids,j,k]\n Lbot_quad = transverse_leakage_dof(bot_phis,-hy/2.,hy,hx,bot_Ds)\n #Lbot_quad = (0.,0,0)\n\n #Add leakages to the Q_local terms\n# print(\"\\n X Information for element: \",i,j)\n# print(\"\\nThe source is: \",Q[i,j,k,0])\n\n Q_local = np.array(Q[i,j,k,:])\n for dof in range(len(Ltop_quad)):\n Q_local[dof] -= 1/hy*(Ltop_quad[dof] - Lbot_quad[dof])\n\n# print(\"The transverse leakage magnitude is: \",-1./hy*(Ltop_quad[0] - Lbot_quad[0]))\n# print(\"Total RHS: \", Q_local[0], Q_local[1])\n\n #Compute the new x fluxes\n phi_new[0,i,j,:] = single_node1GVacuum(D[i,j,k],Sigma[i,j,k],Q_local,hx,localBCs)\n phi,a1,a2,a3,a4 = phi_new[0,i,j,:]\n# print(\"The reaction magnitude: \", phi_new[0,i,j,0]*Sigma[i,j,k])\n# print(\"The current magnitude: \",1./hx*(current(phi_new[0,i,j,:],hx/2,hx,D[i,j,k]) - current(phi_new[0,i,j,:],-hx/2,hx,D[i,j,k])))\n# print(\"\")\n\n #print(i,\"incoming current on left =\", localBCs[0,2],positive_current(phi_new[i,:],-h/2,h,D[i]) )\n if 0*(i>0):\n print(i,\"outgoing current on left =\", negative_current(phi_new[0,i-1,j,:],hx/2,hx,D[i-1,j,k]),\n negative_current(phi_new[0,i,j,:],-hx/2,hx,D[i,j,k]) )\n if 0*(i<I-1):\n print(i,\"outgoing current on right =\", positive_current(phi_new[0,i+1,j,:],-hx/2,hx,D[i+1,j,k]),\n positive_current(phi_new[0,i,j,:],hx/2,hx,D[i,j,k]) )\n #print(i,\"incoming current on right =\", localBCs[1,2],negative_current(phi_new[i,:],h/2,h,D[i]) )\n #print(\"zone \",i,\" current in at right:\",localBCs[1,2],\" current out at right:\",current_left)\n\n \n #Solve for y direction\n d = 1 #solv direction\n tr_id = 0 #trans direction idx in array\n for j in range(J): #spatial loop over J coordinates\n for i in range(I): #spatial loop over X coordinates\n\n if not(j==0):\n phi_left = phi_solution[d,i,j-1,:]\n C = positive_current(phi_left,hy/2,hy,D[i,j-1,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[0,0:3] = [0.25,-D[i,j,k]/2,C]\n else:\n localBCs[0,:] = BCs[2,:].copy()\n localBCs[0,1] *= D[i,j,k]\n if not(j==(J-1)):\n phi_rt = phi_solution[d,i,j+1,:]\n C = negative_current(phi_rt,-hy/2,hy,D[i,j+1,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[1,0:3] = [.25,D[i,j,k]/2,C]\n else:\n localBCs[1,:] = BCs[3,:].copy()\n localBCs[1,1] *= D[i,j,k]\n \n #Compute transverse fluxes\n if j==0:\n nbr_ids = [j,j,j+1] #Assume constant along left edge\n elif j==(J-1):\n nbr_ids = [j-1,j,j] #assume constant along right edge\n else:\n nbr_ids = [j-1,j,j+1] #interior cell\n\n if not i==(I-1):\n rgt_phis = phi_solution[tr_id,i,nbr_ids,:]\n rgt_Ds = D[i,nbr_ids,k]\n Lrgt_quad = transverse_leakage_dof(rgt_phis,hx/2.,hx,hy,rgt_Ds)\n# print(\"Leakage right\",Lrgt_quad)\n# print(\"Just the right leakage\",current(phi_solution[0,i,j,:],hx/2.,hx,D[i,j,k]))\n# print(\"Right outflow, inflow\",positive_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]),\n# negative_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]))\n else:\n rgt_phis = phi_solution[tr_id,i,nbr_ids,:]\n rgt_Ds = D[i,nbr_ids,k]\n Lrgt_quad = transverse_leakage_dof(rgt_phis,hx/2.,hx,hy,rgt_Ds)\n# print(\"Leakage right\",Lrgt_quad)\n# print(\"Just the right leakage\",current(phi_solution[0,i,j,:],hx/2.,hx,D[i,j,k]))\n# print(\"Right outflow, inflow\",positive_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]),\n# negative_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]))\n\n if not i==0:\n lft_phis = phi_solution[tr_id,i,nbr_ids,:]\n lft_Ds = D[i,nbr_ids,k]\n Llft_quad = transverse_leakage_dof(lft_phis,-hx/2.,hx,hy,lft_Ds)\n else:\n lft_phis = phi_solution[tr_id,i,nbr_ids,:]\n lft_Ds = D[i,nbr_ids,k]\n Llft_quad = transverse_leakage_dof(lft_phis,-hx/2.,hx,hy,lft_Ds)\n #Llft_quad = (0.,0,0)\n\n #Add leakages to the Q_local terms\n Q_local = np.array(Q[i,j,k,:])\n# print(\"\\n Y Information for element: \",i,j)\n# print(\"\\nThe source is: \",Q[i,j,k,0])\n for dof in range(len(Lrgt_quad)):\n Q_local[dof] -= 1/hx*(Lrgt_quad[dof] - Llft_quad[dof])\n# print(\"The transverse leakage magnitude is: \",-1./hx*(Lrgt_quad[0] - Llft_quad[0]))\n# print(\"Total RHS: \", Q_local[0], Q_local[1])\n\n phi_new[1,i,j,:] = single_node1GVacuum(D[i,j,k],Sigma[i,j,k],Q_local,hy,localBCs)\n# print(\"The reaction magnitude: \", phi_new[1,i,j,0]*Sigma[i,j,k])\n# print(\"The current magnitude: \",1./hy*(current(phi_new[1,i,j,:],hy/2,hy,D[i,j,k]) - current(phi_new[1,i,j,:],-hy/2,hy,D[i,j,k])))\n# print(\"\")\n phi,a1,a2,a3,a4 = phi_new[1,i,j,:]\n #print(i,\"incoming current on left =\", localBCs[0,2],positive_current(phi_new[i,:],-h/2,h,D[i]) )\n if 0*(i>0):\n print(i,\"outgoing current on left =\", negative_current(phi_new[i-1,:],h/2,h,D[i]),negative_current(phi_new[i,:],-h/2,h,D[i]) )\n if 0*(i<I-1):\n print(i,\"outgoing current on right =\", positive_current(phi_new[i+1,:],-h/2,h,D[i]),positive_current(phi_new[i,:],h/2,h,D[i]) )\n #print(i,\"incoming current on right =\", localBCs[1,2],negative_current(phi_new[i,:],h/2,h,D[i]) )\n #print(\"zone \",i,\" current in at right:\",localBCs[1,2],\" current out at right:\",current_left)\n\n# print(\"X solution\", phi_new[0,:,:,0])\n# print(\"Y solution\", phi_new[1,:,:,0])\n\n #Compute total change in x and y\n relchange = np.linalg.norm( np.reshape(phi_new-phi_solution, 5*I*J*K*2))/np.linalg.norm( np.reshape(phi_new, 5*I*J*K*2))\n reldiff = np.linalg.norm( np.reshape(phi_new[0,:,:,0] - phi_new[1,:,:,0], I*J*K)/np.linalg.norm( np.reshape(phi_new[0,:,:,0],I*J*K)) )\n converged = (relchange < tolerance) or (iteration >= maxits)\n if (LOUD):\n print(\"Iteration\",iteration,\": relative change total =\",relchange,\"relative difference X Y\",reldiff)\n iteration += 1 \n phi_solution = phi_new.copy()\n\n\n x = np.linspace(hx*.5,Nx-hx*.5,I)\n y = np.linspace(hy*.5,Ny-hy*.5,J)\n z = np.linspace(hz*.5,Nz-hz*.5,K)\n return x,y,z,phi_solution[0,:,:,0].reshape(I,J,1)#+phi_solution[1,:,:,0].reshape(I,J,1)))", "def solve_driv(v, ene, s, n, h):\n\n xs = np.array([(k+1)*h for k in range(n)])\n h2 = h*h\n k = np.sqrt(2.0*ene)\n \n vs = [v(x)-ene for x in xs]\n\n mat = laplacian_mat(n) -2.0 * h2 * scipy.sparse.diags(vs, 0) + bc_outgoing_mat(n, h, k)\n vec = np.array([-2.0*h*h*s(x) for x in xs])\n\n ys = scipy.sparse.linalg.spsolve(mat, vec)\n return (xs, ys)", "def solve(self):\n dim = self.puzzle.dimension\n\n # initial loop\n for value, (row, col) in self.puzzle:\n if value:\n self.clear_row(row, value)\n self.clear_col(col, value)\n self.clear_subgrid(row, col, value)\n self.updates.add((value, (row, col)))\n for ps in self.possibilities:\n ps.discard((row, col))\n\n while self.updates:\n while self.updates:\n # while self.updates:\n value, (row, col) = self.updates.pop()\n for i in range(1, dim + 1):\n self.check_row(i, value)\n self.check_col(i, value)\n for i in range(2, 8, 3):\n self.check_subgrid(row, i, value)\n self.check_subgrid(i, col, value)\n\n for value, (row, col) in self.puzzle:\n if not value:\n self.check_cell(row, col)\n\n # for value in range(1, dim + 1):\n # for row in [2, 5, 8]:\n # for col in [2, 5, 8]:\n # self.check_subgrid(row, col, value)", "def solve(self, solver):\n solver.solve()", "def actualSolve(self, lp):\n\t\traise NotImplementedError", "def solve(self):\n # Use a trivial tour (1-2-3-...-N-1) to set the global upper bound.\n tour = list(range(self._N))\n upper_bound = sum([self._G[i][(i + 1) % self._N] for i in range(self._N)])\n trace = []\n\n # Start from a configuration with a single vertex.\n frontier = [BranchAndBoundConfiguration(self._G, self._N, [0], LOWER_BOUND_METHOD)]\n\n # Set the start time.\n start_time = time.time()\n\n # Branch and bound until the frontier set is empty or the time has expired.\n while frontier and (time.time() - start_time) < self._cutoff_time:\n # Fetch the most promising configuration.\n config = heappop(frontier)\n\n # Expand configuration by appending a vertex to the path.\n for v in range(self._N):\n try:\n expanded_config = config.expand(v)\n except ValueError:\n # Expanded configuration is not valid.\n continue\n if expanded_config.is_solution():\n # Update the global upper bound, if needed.\n this_solution = expanded_config.get_cycle_cost()\n if this_solution < upper_bound:\n # Log it.\n trace.append((time.time() - start_time, this_solution))\n # Update the best solution.\n upper_bound = this_solution\n tour = list(expanded_config.get_path())\n elif expanded_config.get_lower_bound() < upper_bound:\n # Add to the frontier set.\n heappush(frontier, expanded_config)\n return (upper_bound, [self._index_to_id[v] for v in tour], trace)", "def solve(self):\n ...", "def solver(graph,homes,source,home_clusters,all_pairs_distances,all_pairs_shortest_paths):\n\n car_path = [get_car_path(graph,home_clusters,source,all_pairs_distances,all_pairs_shortest_paths, \n source_in_clusters = B1, christofides = B2) for B1 in [False,True] for B2 in [False,True]]\n\n dropoffs = [cluster_solver_utils.nearest_dropoff_efficient(graph,path,homes,all_pairs_distances) for path in car_path]\n cost = [cluster_solver_utils.eval_cost_efficient(graph,car_path[i],dropoffs[i],all_pairs_distances) for i in range(len(car_path))]\n\n minimum_cost = min(cost)\n idx = cost.index(minimum_cost)\n\n return minimum_cost, dropoffs[idx], car_path[idx]", "def g_solving_subproblem_of_ALR(self,vehicle_id):\r\n global_LB = -10000\r\n global_UB = 10000\r\n iteration_for_RSP = 20\r\n optimal_solution_for_RSP = None\r\n self.multiplier_v = 0.5\r\n\r\n # solve the expected shortest path problem\r\n self.g_dynamic_programming_algorithm(vehicle_id, 3)\r\n\r\n # obtain the variance\r\n y_ =self.g_ending_state_vector[vehicle_id].VSStateVector[0].Primal_Label_cost_variance\r\n\r\n for k in range(iteration_for_RSP):\r\n # print(k)\r\n LB = 0\r\n # step 2: solve decomposed dual problems\r\n # Part I: subproblem of x\r\n self.g_dynamic_programming_algorithm(vehicle_id, 1)\r\n LB += self.g_ending_state_vector[vehicle_id].VSStateVector[0].Label_cost_for_searching\r\n\r\n # Part II: subproblem of y\r\n obj_of_y_ = self.reliability * (y_) ** 0.5 - self.multiplier_v * y_\r\n if obj_of_y_ > 0:\r\n y = 0\r\n LB += 0\r\n else:\r\n y = y_\r\n LB += obj_of_y_\r\n\r\n # generate an upper bound\r\n variance = self.g_ending_state_vector[vehicle_id].VSStateVector[0].Primal_Label_cost_variance\r\n Label_cost_for_lagrangian_mean = self.g_ending_state_vector[vehicle_id].VSStateVector[0].Label_cost_for_searching_mean\r\n UB = Label_cost_for_lagrangian_mean + self.reliability * (variance) ** 0.5\r\n\r\n # print(\"UB:{}\".format(UB))\r\n # print(\"LB:{}\".format(LB))\r\n\r\n # UB and LB update\r\n if LB > global_LB:\r\n global_LB = LB\r\n\r\n if UB < global_UB:\r\n global_UB = UB\r\n optimal_solution_for_RSP = self.g_ending_state_vector[vehicle_id].VSStateVector[0]\r\n\r\n # step 3: update multipliers\r\n if variance- y != 0:\r\n self.multiplier_v+= (global_UB - LB) / (variance-y)\r\n # if self.multiplier_v<0:\r\n # self.multiplier_v=1\r\n # print(self.multiplier_v)\r\n\r\n # step 4: termination condition test\r\n if global_UB != 0:\r\n gap = abs((global_UB - global_LB) / global_UB)\r\n # print(gap)\r\n if gap < 0.02:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP, global_LB\r\n else:\r\n if global_UB - global_LB == 0:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP, global_LB\r\n\r\n if k == iteration_for_RSP - 1:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP, global_LB", "def refl_analysis(self,dials_model):\n Z = self.refl_table\n indices = Z['miller_index']\n expts = ExperimentListFactory.from_json_file(dials_model,\n check_format=False)\n self.dials_model=expts[0]\n CRYS = self.dials_model.crystal\n UC = CRYS.get_unit_cell()\n strong_resolutions = UC.d(indices)\n order = flex.sort_permutation(strong_resolutions, reverse=True)\n Z[\"spots_order\"] = order\n self.spots_pixels = flex.size_t()\n spots_offset = flex.int(len(order),-1)\n spots_size = flex.int(len(order),-1)\n\n P = panels = Z['panel']\n S = shoeboxes = Z['shoebox']\n N_visited = 0; N_bad = 0\n for oidx in range(len(order)): #loop through the shoeboxes in correct order\n sidx = order[oidx] # index into the Miller indices\n ipanel = P[sidx]\n slow_size = 254\n fast_size = 254\n panel_size=slow_size*fast_size\n bbox = S[sidx].bbox\n first_position = spots_offset[sidx] = self.spots_pixels.size()\n for islow in range(max(0,bbox[2]-3), min(slow_size,bbox[3]+3)):\n for ifast in range(max(0,bbox[0]-3), min(fast_size,bbox[1]+3)):\n value = self.trusted_mask[ipanel][islow*slow_size + ifast]\n N_visited += 1\n if value: self.spots_pixels.append(ipanel*panel_size+islow*slow_size+ifast)\n else: N_bad+=1\n spot_size = spots_size[sidx] = self.spots_pixels.size() - first_position\n Z[\"spots_offset\"] = spots_offset\n Z[\"spots_size\"] = spots_size\n print (N_visited,\"pixels were visited in the %d shoeboxes (with borders)\"%len(order))\n print (N_bad,\"of these were bad pixels, leaving %d in target\"%(len(self.spots_pixels)))", "def solve_polyphase_instance(\n allele_matrix, genotype_list, param, timers, partial_phasing=None, quiet=False\n):\n num_vars = len(allele_matrix.getPositions())\n\n # Precompute block borders based on read coverage and linkage between variants\n if not quiet:\n logger.info(\"Detecting connected components with weak interconnect ..\")\n timers.start(\"detecting_blocks\")\n\n ploidy = param.ploidy\n sl = param.block_cut_sensitivity <= 1\n block_starts = compute_block_starts(allele_matrix, ploidy, single_linkage=sl)\n\n # Set block borders and split readset\n block_starts.append(num_vars)\n num_blocks = sum(1 for i, j in zip(block_starts[:-1], block_starts[1:]) if j > i + 1)\n if not quiet:\n logger.info(\n f\"Split heterozygous variants into {num_blocks} blocks (and {len(block_starts) - num_blocks - 1} singleton blocks).\"\n )\n\n # Process blocks independently\n results = []\n processed_blocks = 0\n timers.stop(\"detecting_blocks\")\n\n \"\"\"\n Python's multiprocessing makes hard copies of the passed arguments, which is not trivial for\n cython objects, especially when they contain pointers to other cython objects. Any passed\n object must be (de)serializable (in Python: pickle). All other objects created in the main\n thread are also accessible by the workers, but they are handled via the copy-on-write policy.\n This means, that e.g. the large main matrix is not hardcopied for every thread, as long as it\n is not modified there. This must be ensured to prevent a massive waste of memory consumption.\n \"\"\"\n if param.threads == 1:\n # for single-threading, process everything individually to minimize memory footprint\n for block_id, (start, end) in enumerate(zip(block_starts[:-1], block_starts[1:])):\n submatrix = allele_matrix.extractInterval(start, end)\n subphasing = partial_phasing.extractInterval(start, end) if partial_phasing else None\n if end - start > 1:\n processed_blocks += 1\n if not quiet:\n logger.info(\n f\"Processing block {processed_blocks} of {num_blocks} with {len(submatrix)} reads and {end - start} variants.\"\n )\n results.append(\n phase_single_block(\n block_id, submatrix, genotype_list[start:end], subphasing, param, timers, quiet\n )\n )\n del submatrix\n\n else:\n # sort block by descending size (4/3-approximation for scheduling problem)\n timers.start(\"phase_blocks\")\n joblist = list(zip(range(len(block_starts)), block_starts[:-1], block_starts[1:]))\n joblist.sort(key=lambda x: x[1] - x[2])\n\n with Pool(processes=param.threads) as pool:\n process_results = [\n pool.apply_async(\n phase_single_block_mt,\n (\n allele_matrix,\n partial_phasing,\n block_id,\n start,\n end,\n genotype_list[start:end],\n param,\n timers,\n job_id,\n num_blocks,\n quiet,\n ),\n )\n for job_id, (block_id, start, end) in enumerate(joblist)\n ]\n # collect all blockwise results\n blockwise_results = [res.get() for res in process_results]\n results = sorted(blockwise_results, key=lambda x: x.block_id)\n\n timers.stop(\"phase_blocks\")\n\n # Aggregate blockwise results\n if partial_phasing and param.block_cut_sensitivity == 0:\n # For lowest sensitivity, do not add block starts to global breakpoint list\n # (unless the partial phasing is also interrupted there)\n borders = {partial_phasing.getFirstPos(i) for i in range(len(partial_phasing))}\n else:\n borders = []\n return aggregate_results(results, ploidy, borders)", "def solve_system(A, method):\n # find b vector such that Ax = b\n # with x = [0 1 2 ... size(m)]\n size = A.shape\n true_x = list(xrange(0, size[1]))\n b = A.dot(true_x)\n\n # solve Ax = b and check solution error\n # diretti\n if method in [sla.spsolve, direttolu]:\n x = method(A, b)\n print(\"\\t\" + method.func_name + \" solved \" + \n str(size))\n return x, sol_error(x, true_x)\n\n # iterativi\n else: \n # per accellerare la convergenza dei metodi iterativi\n # dobbiamo passare un precondizionatore (una matrice M,\n # che approssima l'inversa di A)\n # http://osdir.com/ml/python-scientific-user/2011-06/msg00249.html\n try:\n P = sla.spilu(A, drop_tol=1e-5) \n except Exception as err:\n print(\"\\t\", err)\n print(\"\\tPorta le tue sporche matrici singolari altrove...\")\n return None, \"nan\"\n\n M = sla.LinearOperator(size, P.solve)\n\n global current_x\n current_x = None\n try: \n x, status = method(A, \n b, \n tol=1e-16, \n M=M,\n maxiter=500,\n callback=callback_func)\n except Exception:\n print(\"\\t\" + method.func_name + \" converged on \" + str(size))\n return current_x, sol_error(current_x, true_x)\n\n if status != 0:\n print(\"\\t\" + method.func_name + \" DIDN'T converge on \" +\n str(size) + \" in less than 500 iterations\")\n return current_x, sol_error(x, true_x)\n else:\n print(\"\\t\" + method.func_name + \" converged on \" +\n str(size))\n return current_x, sol_error(x, true_x)", "async def solve(self):\n\n \"\"\"TODO:\n Handle guess and checking:\n 1) Make guess (Make logical guess based on what could be most impactful...i.e. if two spots can have either number in a row)\n 2) Fork based on guess\n 3) Check if one raises from impossible square (delete this fork)\n 4) Check if one completes (will simply return from .gather)\n 5) Each board can recurse through this guess and checking, just in case\n \"\"\"\n tasks = [square.check() for row in self for square in row]\n\n return await asyncio.gather(*tasks, return_exceptions=False)", "def solve(self):\n smallest_f = self.get_smallest_f_cost_unvisited_node()\n smallest_f_node = smallest_f[0]\n\n if smallest_f[1] > 1:\n current_node = self.get_smallest_h_cost_unvisited_node()\n else:\n current_node = smallest_f_node\n if current_node.f_cost == self.inf:\n return\n\n self.set_h_cost(current_node)\n self.unvisited_pos.remove(current_node.pos)\n self.visited_pos.append(current_node.pos)\n neighbours = algo_utils.get_neighbours(current_node, self.grid, self.wall_pos)\n\n for neigh in neighbours:\n neighbour_dist = neigh.g_cost\n current_dist = current_node.g_cost\n new_dist = current_dist + 1\n if neighbour_dist < new_dist:\n continue\n neigh.g_cost = new_dist\n self.set_h_cost(neigh)\n mix_neigh = {neigh.pos: neigh.g_cost}\n self.mix.update(mix_neigh)\n mix_current = {current_node.pos: current_node.g_cost}\n self.mix.update(mix_current)\n\n smallest_f = self.get_smallest_f_cost_unvisited_node()\n smallest_f_node = smallest_f[0]\n smallest_h_node = self.get_smallest_h_cost_unvisited_node()\n\n if (\n self.end_pos not in self.unvisited_pos\n or algo_utils.get_smallest_g_cost_unvisited_node(\n self.grid, self.unvisited_pos\n ).g_cost\n == self.inf\n ):\n for key, value in self.mix.items():\n self.mix[key] = round((value * 1.0) / self.end_node.g_cost, 3)\n self.backtrack_path(self.end_node)\n else:\n if smallest_f[1] > 1:\n current_node = smallest_h_node\n else:\n current_node = smallest_f_node\n self.solve()", "def SolveSCP(self):\n\n t0 = time()\n\n # Some predicates\n Lu_min = 0.\n niters_max = self._maxiters\n maxfracchange = self._maxfracchange\n\n # initialization, resetting ...\n self.reset_all() # including _u_naught(), first application\n scp_min = self.greedy()\n\n # re-initialization iteration; col fixing ignored for the moment\n niters = 0\n f_change = _largenumber\n while (f_change>maxfracchange) and (niters<niters_max):\n # re-initialize u\n if (np.mod(niters, 2)==0): \n self.reset_u(random=True)\n else:\n self.reset_u()\n u_tmp, Lu_tmp = self.subgradient() # find a near-optimal solution \n u, Lu = self.subgradient() # rerun subgradient to get a set of Lagrangian multipliers\n\n scp_all = np.zeros(self._subg_nsteps)\n for i in np.arange(self._subg_nsteps):\n #self.reset_s()\n self.s = np.copy(self.f)\n scp_all[i] = self.greedy(u=u[:,i])\n\n # check if the solution is gettting better\n imin_tmp = (np.where(scp_all==np.amin(scp_all)))[0]\n imin = imin_tmp[np.argmax(Lu[imin_tmp])]\n imax = np.argmax(Lu)\n if (np.mod(niters, 5)==0):\n print(\"This Best solution: UB={0}, LB={1}, UB1={2}, LB1={3}\".format(scp_all[imin], Lu[imin], scp_all[imax], Lu[imax]))\n if (niters==0) or ((scp_all[imin]<=scp_min) and ((Lu[imin]-Lu_min)>-(np.fabs(Lu_min)*self._LB_maxfracchange))):\n scp_min = scp_all[imin]\n u_min = np.copy(u[:,imin])\n Lu_min = Lu[imin]\n self.stepsize = _stepsize\n\n LB = Lu_min\n\n # final step, needs to get u_min back\n self.u = np.copy(u_min)\n self.s = np.copy(self.f)\n UB = self.greedy()\n\n # Which is better? absolute change or fractional change? \n # Both are fine, but cost should be normalized over the mean/median.\n GAP = (UB-LB)/np.fabs(UB)\n f_change = GAP\n if (np.mod(niters, 5)==0):\n print(\"Current Best Solution: UB={0}, LB={1}, change={2}% @ niters={3}\".format(UB,LB,f_change*100.,niters))\n niters = niters + 1\n if (niters == niters_max): \n #warnings.warn(\"Iteration reaches maximum = {0}\".format(niters))\n print(\"Iteration in re-initialization reaches maximum number = {0}\".format(niters))\n\n # Need to remove redundant columns\n # self.remove_redundant() # this itself is NP-hard ...\n\n print(\"Current Best Solution: UB={0}, LB={1}, change={2}% @ niters={3}\".format(UB,LB,f_change*100.,niters))\n print(\"Final Best solution: {0}\".format(UB))\n time_used = (time()-t0)/60.\n print(\"Took {0:.3f} minutes to reach current solution.\".format(time_used))\n\n return (UB,time_used)", "def standardBlock(solver):\n #Create and fill shared array\n createCPUSharedArray(solver,numpy.zeros(solver.sharedShape,dtype=solver.dtype).nbytes)\n for i in range(solver.intermediate):\n solver.sharedArray[i,:,solver.operating:-solver.operating,solver.operating:-solver.operating] = solver.initialConditions[solver.globalBlock]\n solver.sharedArray[i,:,solver.operating:-solver.operating,:solver.operating] = solver.initialConditions[solver.globalBlock[0],solver.globalBlock[1],-solver.operating-1:-1]\n solver.sharedArray[i,:,solver.operating:-solver.operating,-solver.operating:] = solver.initialConditions[solver.globalBlock[0],solver.globalBlock[1],1:solver.operating+1]\n #Create phase objects\n solver.standard = geometry.Geometry() \n solver.standard.setAdjustment(solver.operating)\n #Setting up GPU\n if solver.gpuBool:\n # Creating cuda device and context\n cuda.init()\n cuda_device = cuda.Device(solver.gpuRank)\n solver.cuda_context = cuda_device.make_context()\n setupGPUStandard(solver)\n #Setup CPU\n setupCPUStandard(solver)\n solver.comm.Barrier() #Ensure all processes are", "def solve_steady_state(self):\n # optimization has to be done on the reduced system\n # TODO: implement different comp. sizes\n s0 = self.model.get_initial_conc()\n [L_inv, L, _] = self.model.N_partitioned\n si = numpy.dot(L_inv, s0)\n t = s0 - numpy.dot(L, si)\n f = lambda x: numpy.linalg.norm(\n self.dS_dt(numpy.dot(L, x) + t, 1))\n ss_i = scipy.optimize.fmin_bfgs(f, si)\n ss = numpy.dot(L, ss_i) + t\n return ss", "def solve(puzzle):\n print(\"Solving...\")\n array_puzzle = np.asarray(puzzle)\n array_puzzle.flags.writeable = False # Turn off writable flags to prevent data being ovewritten accidentally.\n goal_state = __generate_goal(len(array_puzzle[0]), len(array_puzzle))\n\n flat_puzzle = list(chain.from_iterable(puzzle)) # Flatten the list\n\n # If the puzzle doesn't contain 0, exit.\n try:\n flat_puzzle.remove(0) # Remove 0 from the list\n except:\n print(\"All puzzles must include an open tile (0).\")\n return None\n\n inversions = __count_inversions(flat_puzzle) # Count the inversions\n\n # width = len(array_puzzle[0]) # Get the width of the puzzle (columns)\n # length = len(array_puzzle) # Get the length of the puzzle (rows)\n\n oddEven = __odd_or_even(len(array_puzzle[0])) # Determine if the width is odd or even.\n start_position = __find_start(array_puzzle) # Find the start position's row\n solvable = __is_solvable(oddEven, inversions, len(array_puzzle), start_position) # Cleck if the puzzle is solvable.\n\n # If the puzzle is not solvable, return None.\n if(solvable == \"None\"):\n return None\n\n # If we cannot calculate a* (for example the given values are not all in sequential order (1-5) 4 is replaced by 6 (1,2,3,5,6))\n try:\n return __a_star(array_puzzle, goal_state)\n except:\n print(\"Please make sure there are no duplicate or skipped inputs.\")\n return None\n\n # This code was used in testing to print out the string.\n # solved = __a_star(array_puzzle, goal_state)\n # Return the moves needed to complete the puzzle.\n # return print(str(__build_string(solved)) + \" (\" + str(len(solved)) + \")\")", "def construct_linear_system(self):\n N=self.grid.Ncells()\n Nbc = len(self.dirichlet_bcs)\n self.Ncalc=Ncalc = N - Nbc\n\n # map cells to forced values\n dirichlet = dict( [ (c,v) for c,v,xy in self.dirichlet_bcs])\n\n self.is_calc_c = is_calc_c = np.ones(N,np.bool8)\n for c,v,xy in self.dirichlet_bcs:\n is_calc_c[c] = False\n\n # is_calc_c[self.c_mask] = False\n\n # c_map is indexed by real cell indices, and returns the matrix index\n c_map = self.c_map = np.zeros(N,np.int32)\n self.c_map[is_calc_c] = np.arange(Ncalc)\n\n dzc=self.dzc\n dzf=self.dzf\n area_c=self.area_c\n\n meth='coo' # 'dok'\n if meth == 'dok':\n A=sparse.dok_matrix((Ncalc,Ncalc),np.float64)\n else:\n # construct the matrix from a sequence of indices and values\n ij=[]\n values=[] # successive value for the same i.j will be summed\n \n b = np.zeros(Ncalc,np.float64)\n flux_per_gradient_j = -self.K_j * self.l_j * dzf / self.d_j * self.dt\n\n self.grid.edge_to_cells() # makes sure that edges['cells'] exists.\n \n for j in range(self.grid.Nedges()):\n e = self.grid.edges[j]\n ic1,ic2 = e['cells']\n \n if ic1<0 or ic2<0 or e['deleted']:\n continue # boundary edge, or deleted edge\n \n flux_per_gradient=flux_per_gradient_j[j]\n \n # this is the desired operation:\n # Cdiff[ic1] -= flux_per_gradient / (An[ic1]*dzc) * (C[ic2] - C[ic1])\n # Cdiff[ic2] += flux_per_gradient / (An[ic2]*dzc) * (C[ic2] - C[ic1])\n # Where Cdiff is row, C is col\n\n if is_calc_c[ic1] and is_calc_c[ic2]:\n mic2 = c_map[ic2]\n mic1 = c_map[ic1]\n v1=flux_per_gradient / (area_c[ic1]*dzc[ic1])\n v2=flux_per_gradient / (area_c[ic2]*dzc[ic2])\n \n if meth == 'dok':\n A[mic1,mic2] -= v1\n A[mic1,mic1] += v1\n A[mic2,mic2] += v2\n A[mic2,mic1] -= v2\n else:\n ij.append( (mic1,mic2) ) ; values.append(-v1)\n ij.append( (mic1,mic1) ) ; values.append(v1)\n ij.append( (mic2,mic2) ) ; values.append(v1)\n ij.append( (mic2,mic1) ) ; values.append(-v1)\n \n elif not ( is_calc_c[ic1] or is_calc_c[ic2] ):\n # both are dirichlet, so nothing to do\n pass\n elif not is_calc_c[ic2]:\n mic1 = c_map[ic1]\n v=flux_per_gradient / (self.area_c[ic1]*dzc[ic1])\n if meth == 'dok':\n A[mic1,mic1] += v\n else:\n ij.append( (mic1,mic1) )\n values.append(v)\n\n # roughly\n # A[1,1]*x[1] + A[1,2]*x[2] + ... = b[1]\n # but we already know x[2],\n # A[1,1]*x[1] + ... = b[1] - A[1,2]*x[2]\n # so flip the sign, multiply by known dirichlet value, and\n # add to the RHS\n b[mic1] += flux_per_gradient / (area_c[ic1]*dzc[ic1]) * dirichlet[ic2]\n else: # not is_calc_c[c1]\n mic2 = c_map[ic2]\n # A[mic2,mic2] += flux_per_gradient / (area_c[ic2]*dzc[ic2])\n # A[mic2,mic1] -= flux_per_gradient / (area_c[ic2]*dzc[ic2])\n\n # A[mic2,mic2]*x[2] + A[mic2,mic1]*x[1] = b[2]\n # ...\n # A[mic2,mic2]*x[2] - flux_per_gradient / (area_c[ic2]*dzc[ic2])*x[1] = b[2]\n # ...\n # A[mic2,mic2]*x[2] = b[2] + flux_per_gradient / (area_c[ic2]*dzc[ic2])*x[1]\n v=flux_per_gradient / (area_c[ic2]*dzc[ic2])\n if meth == 'dok':\n A[mic2,mic2] += v\n else:\n ij.append( (mic2,mic2) )\n values.append(v)\n b[mic2] += flux_per_gradient / (area_c[ic2]*dzc[ic2]) * dirichlet[ic1]\n\n # Used to test 'is not 0:' but modern python complains\n if isinstance(self.alpha,np.ndarray): \n for c in range(N):\n if self.is_calc_c[c]:\n mic=self.c_map[c]\n v=self.alpha[c]*self.dt\n if meth == 'dok':\n A[mic,mic] -= v\n else:\n ij.append( (mic,mic) )\n values.append(-v)\n\n # Flux boundary conditions:\n for ic,value,xy in self.neumann_bcs:\n mic=c_map[ic]\n # make mass/time into concentration/step\n # arrived at minus sign by trial and error.\n # 2023-08-04: there was a bug here that used ic2 instead of ic.\n b[mic] -= value/(area_c[ic]*dzc[ic]) * self.dt\n\n if meth == 'dok':\n self.A = sparse.coo_matrix(A)\n else:\n ijs=np.array(ij,dtype=np.int32)\n data=np.array(values,dtype=np.float64)\n A=sparse.coo_matrix( (data, (ijs[:,0],ijs[:,1]) ), shape=(Ncalc,Ncalc) )\n self.A=A\n \n # report scale to get a sense of whether dt is too large\n Ascale = A.diagonal().min()\n log.debug(\"Ascale is %s\"%Ascale)\n\n self.b = b", "def softclippingHGMevaluation(input_generator,branches,iden_method,Plot,reference=None):\n for t in range(8,11):\n t = t / 10.0\n p = 1.0 - t\n input_signal = input_generator.GetOutput()\n nl_functions = [nlsp.function_factory.softclip(power=p),]*branches\n filter_spec_tofind = nlsp.log_bpfilter(branches=branches, input=input_signal)\n ref_nlsystem = nlsp.HammersteinGroupModel_up(input_signal=input_signal,\n nonlinear_functions=nl_functions,\n filter_irs=filter_spec_tofind,\n max_harmonics=range(1,branches+1))\n\n found_filter_spec, nl_functions = iden_method(input_generator,ref_nlsystem.GetOutput(),branches)\n iden_nlsystem = nlsp.HammersteinGroupModel_up(input_signal=input_signal,\n nonlinear_functions=nl_functions,\n filter_irs=found_filter_spec,\n max_harmonics=range(1,branches+1))\n # sine = sumpf.modules.SineWaveGenerator(frequency=5000.0,phase=0.0,samplingrate=input_signal.GetSamplingRate(),length=len(input_signal)).GetSignal()\n sine = sumpf.modules.SweepGenerator(samplingrate=input_signal.GetSamplingRate(),length=len(input_signal)).GetSignal()\n ref_nlsystem.SetInput(sine)\n iden_nlsystem.SetInput(sine)\n if reference is not None:\n reference = nlsp.change_length_signal(reference,length=len(input_signal))\n ref_nlsystem.SetInput(reference)\n iden_nlsystem.SetInput(reference)\n\n if Plot is True:\n plot.relabelandplot(sumpf.modules.FourierTransform(ref_nlsystem.GetOutput()).GetSpectrum(),\"Reference System\",show=False)\n plot.relabelandplot(sumpf.modules.FourierTransform(iden_nlsystem.GetOutput()).GetSpectrum(),\"Identified System\",show=False)\n print \"SNR between Reference and Identified output for symmetric hardclipping HGM(threshold:%r): %r\" %(t,nlsp.snr(ref_nlsystem.GetOutput(),\n iden_nlsystem.GetOutput()))", "def score_solution(g, s):\n pass", "def solve(self):\r\n while not self.done():\r\n self.no_open_cells()\r\n self.all_cells_are_mines()\r\n self.no_mines()\r\n if not self.done():\r\n self.obvious_cells()\r\n if not self.done():\r\n made_progress = self.safe_neighbour_difference()\r\n if made_progress:\r\n continue\r\n if not self.done():\r\n made_progress = self.adjacent_combinations()\r\n if made_progress:\r\n continue\r\n return", "def Solve(self, master_solver, local_solvers, max_iteration = 100):\n\n self.total_time = 0\n self.coordination_time = 0\n self.local_times = {}\n self.n_iter = 0\n self.n_iter_max = max_iteration\n self.local_solvers = { cml: local_solvers[indx] for indx, cml in enumerate(self.cmodels_local) }\n\n def SolveLocal(cmodel_local):\n cur_solver = self.local_solvers[cmodel_local]\n solution = cur_solver.Solve(cmodel_local)\n if solution is not None:\n self.total_time += solution['Time']\n if self.n_iter not in self.local_times:\n self.local_times[self.n_iter] = []\n self.local_times[self.n_iter].append(solution['Time'])\n return solution\n\n def SolveLocalAll():\n for cml in self.cmodels_local:\n SolveLocal(cml)\n\n def Compose():\n for cm_loc in self.cmodels_local:\n for lv in cm_loc.component_objects(pyo.Var, active = True):\n v = getattr(self.cmodel, lv.name)\n for index in lv:\n v[index].fix(pyo.value(lv[index]))\n\n def Decompose():\n for cm_loc in self.cmodels_local:\n for lp in cm_loc.component_objects(pyo.Param, active = True):\n if lp._mutable:\n p = getattr(self.cmodel, lp.name)\n if lp.is_indexed() is False:\n lp.value = pyo.value(p)\n continue\n for index in lp:\n lp[index] = pyo.value(p[index])\n\n def CollectData(output_data_obj_master = False, output_data_obj_local = False, output_data_params = False, output_times = False):\n #loger function\n def LogCollectedData(log_str):\n prefix = f'#Iteration: {self.n_iter} #'\n print(prefix + log_str)\n #collect objectives values\n obj_master = [ obj for obj in self.cmodel.component_objects(pyo.Objective, active = True) ][0]\n obj_master_value = pyo.value(obj_master)\n self.data_recorded.master_obj_value_list.append( obj_master_value )\n if output_data_obj_master:\n LogCollectedData(f'MASTER OBJ VALUE: {obj_master_value}')\n for indx, cm_loc in enumerate(self.cmodels_local):\n obj_local = [ obj for obj in cm_loc.component_objects(pyo.Objective, active = True) ][0]\n obj_local_val = pyo.value(obj_local)\n self.data_recorded.local_obj_value_list[indx].append( obj_local_val )\n if output_data_obj_local:\n LogCollectedData(f'LOCAL[{indx}] OBJ VALUE: {obj_local_val}')\n #collect Lagrangian multipliers\n for p in self.cmodel.component_objects(pyo.Param, active = True):\n if p._mutable and 'Lagrangian' in p.name:\n if p.name not in self.data_recorded.multipliers_dict:\n self.data_recorded.multipliers_dict[p.name] = []\n pv_list = [pv.value for pv in p.values()]\n self.data_recorded.multipliers_dict[p.name].append(pv_list)\n if output_data_params:\n LogCollectedData(f'PARAM <{p.name}>: {pv_list}')\n #collect spent times\n if output_times:\n sum_time = sum(self.local_times[self.n_iter]) + self.coordination_time \n LogCollectedData(f'SPENT TIME. COORDINATION:[{self.coordination_time}] + LOCAL:{self.local_times[self.n_iter ]} := {sum_time}')\n\n\n SolveLocalAll()\n while True:\n Compose()\n CollectData(output_data_obj_master = True, output_data_obj_local = False, \n output_data_params = False, output_times = True)\n coord_ret = self.coordinator.Coordinate(self.cmodel, master_solver)\n self.coordination_time = coord_ret['Time'] if coord_ret['Time'] is not None else 0\n self.total_time += self.coordination_time\n if coord_ret['Terminate'] == True or (self.n_iter >= self.n_iter_max):\n break\n self.n_iter += 1\n Decompose()\n if self.local_solving_manager(SolveLocal, self.cmodels_local) == False:\n return None\n self.cmodel = self.coordinator.RetrieveBest()\n\n ret_val = master_solver.ExtractSolution(self.cmodel)\n ret_val = { 'ObjectiveDual' : pyo.value(self.cmodel.ObjDual), 'Objective': pyo.value(self.cmodel.Obj),\n 'Strain': ret_val[1], 'Route': ret_val[2], 'Time': self.total_time }\n return ret_val", "def linearize_and_solve(g):\n\n # initialize the sparse H and the vector b\n H = np.zeros((len(g.x), len(g.x)), dtype='float')\n b = np.zeros(len(g.x), dtype='float')\n\n # set flag to fix gauge\n needToAddPrior = True\n Fx = 0\n\n # compute the addend term to H and b for each of our constraints\n print('linearize and build system')\n\n for edge in g.edges:\n\n # pose-pose constraint\n if edge.Type == 'P':\n\n # compute idx for nodes using lookup table\n fromIdx = g.lut[edge.fromNode]\n toIdx = g.lut[edge.toNode]\n\n # get node state for the current edge\n x_i = g.x[fromIdx:fromIdx + 3]\n x_j = g.x[toIdx:toIdx + 3]\n\n # (TODO) compute the error and the Jacobians\n e, A, B = linearize_pose_pose_constraint(\n x_i, x_j, edge.measurement)\n\n # # (TODO) compute the terms\n b_i = e.transpose() @ edge.information @ A\n b_j = e.transpose() @ edge.information @ B\n H_ii = A.transpose() @ edge.information @ A\n H_ij = A.transpose() @ edge.information @ B\n H_jj = B.transpose() @ edge.information @ B\n\n # (TODO) add the terms to H matrix and b\n H[fromIdx:fromIdx + 3, fromIdx:fromIdx + 3] += H_ii\n H[toIdx:toIdx + 3, toIdx:toIdx + 3] += H_jj\n H[fromIdx:fromIdx + 3, toIdx:toIdx + 3] += H_ij\n H[toIdx:toIdx + 3, fromIdx:fromIdx + 3, ] += H_ij.transpose()\n b[fromIdx:fromIdx + 3] += b_i[0, :]\n b[toIdx:toIdx + 3] += b_j[0, :]\n\n # Add the prior for one pose of this edge\n # This fixes one node to remain at its current location\n if needToAddPrior:\n H[fromIdx:fromIdx + 3, fromIdx:fromIdx +\n 3] = H[fromIdx:fromIdx + 3,\n fromIdx:fromIdx + 3] + 1000 * np.eye(3)\n needToAddPrior = False\n\n # pose-pose constraint\n elif edge.Type == 'L':\n print(\"you shouldn't be here...\")\n # compute idx for nodes using lookup table\n fromIdx = g.lut[edge.fromNode]\n toIdx = g.lut[edge.toNode]\n\n # get node states for the current edge\n x = g.x[fromIdx:fromIdx + 3]\n l = g.x[toIdx:toIdx + 2]\n\n # (TODO) compute the error and the Jacobians\n e, A, B = linearize_pose_landmark_constraint(\n x, l, edge.measurement)\n\n # (TODO) compute the terms\n b_i = e.transpose() @ edge.information @ A\n b_j = e.transpose() @ edge.information @ B\n H_ii = A.transpose() @ edge.information @ A\n H_ij = A.transpose() @ edge.information @ B\n H_jj = B.transpose() @ edge.information @ B\n\n # (TODO )add the terms to H matrix and b\n H[fromIdx:fromIdx + 3, fromIdx:fromIdx + 3] += H_ii\n H[toIdx:toIdx + 2, toIdx:toIdx + 2] += H_jj\n H[fromIdx:fromIdx + 3, toIdx:toIdx + 2] += H_ij\n H[toIdx:toIdx + 2, fromIdx:fromIdx + 3, ] += H_ij.transpose()\n b[fromIdx:fromIdx + 3] = b_i\n b[toIdx:toIdx + 2] = b_j\n # solve system\n dx = np.linalg.solve(H, b)\n\n return dx", "def solve(self):\n pass", "def solve(self):\n pass", "def solver_mll(X, y, alpha=0.1, C=None, S=None, callback=None, positive=False,\n maxiter=1000, tol=1e-4, compute_obj=False):\n n_tasks, n_samples, n_features = X.shape\n lasso = Lasso(alpha=alpha, fit_intercept=False,\n positive=positive)\n lasso_p = Lasso(alpha=alpha / n_tasks, fit_intercept=False,\n positive=True)\n if S is None:\n S = np.zeros((n_features, n_tasks))\n if C is None:\n C = np.ones(n_features)\n else:\n if C.max() <= 0:\n C = np.ones(n_features)\n\n old_theta = C[:, None] * S\n objs = []\n if compute_obj or callback:\n ll = objective(X, y, C, S, alpha)\n objs.append(ll)\n for i in range(maxiter):\n # W = block_diag(X * C[None, None, :], \"csc\")\n # lasso.fit(W, y.flatten())\n # S = lasso.coef_.reshape(n_tasks, n_features).T\n W = X * C[None, None, :]\n for k in range(n_tasks):\n lasso.fit(W[k], y[k])\n S[:, k] = lasso.coef_\n Z = S.T[:, None, :] * X\n Z = Z.reshape(n_tasks * n_samples, n_features)\n lasso_p.fit(Z, y.flatten())\n C = lasso_p.coef_\n theta = C[:, None] * S\n dll = abs(theta - old_theta).max()\n dll /= max(theta.max(), old_theta.max(), 1.)\n old_theta = theta.copy()\n if compute_obj or callback:\n ll = objective(X, y, C, S, alpha)\n objs.append(ll)\n if callback:\n callback(theta, obj=ll)\n if dll < tol:\n break\n\n if i == maxiter - 1:\n print(\"**************************************\\n\"\n \"******** WARNING: Stopped early. *****\\n\"\n \"\\n\"\n \"You may want to increase maxiter. Last err: %f\" % dll)\n return C, S, objs", "def putsolution(self,whichsol_,skc,skx,skn,xc,xx,y,slc,suc,slx,sux,snx): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n if skc is None:\n skc_ = None\n else:\n try:\n skc_ = memoryview(skc)\n except TypeError:\n try:\n _tmparr_skc = array.array(\"i\",skc)\n except TypeError:\n raise TypeError(\"Argument skc has wrong type\")\n else:\n skc_ = memoryview(_tmparr_skc)\n \n else:\n if skc_.format != \"i\":\n skc_ = memoryview(array.array(\"i\",skc))\n \n if skx is None:\n skx_ = None\n else:\n try:\n skx_ = memoryview(skx)\n except TypeError:\n try:\n _tmparr_skx = array.array(\"i\",skx)\n except TypeError:\n raise TypeError(\"Argument skx has wrong type\")\n else:\n skx_ = memoryview(_tmparr_skx)\n \n else:\n if skx_.format != \"i\":\n skx_ = memoryview(array.array(\"i\",skx))\n \n if skn is None:\n skn_ = None\n else:\n try:\n skn_ = memoryview(skn)\n except TypeError:\n try:\n _tmparr_skn = array.array(\"i\",skn)\n except TypeError:\n raise TypeError(\"Argument skn has wrong type\")\n else:\n skn_ = memoryview(_tmparr_skn)\n \n else:\n if skn_.format != \"i\":\n skn_ = memoryview(array.array(\"i\",skn))\n \n if xc is None:\n xc_ = None\n else:\n try:\n xc_ = memoryview(xc)\n except TypeError:\n try:\n _tmparr_xc = array.array(\"d\",xc)\n except TypeError:\n raise TypeError(\"Argument xc has wrong type\")\n else:\n xc_ = memoryview(_tmparr_xc)\n \n else:\n if xc_.format != \"d\":\n xc_ = memoryview(array.array(\"d\",xc))\n \n if xx is None:\n xx_ = None\n else:\n try:\n xx_ = memoryview(xx)\n except TypeError:\n try:\n _tmparr_xx = array.array(\"d\",xx)\n except TypeError:\n raise TypeError(\"Argument xx has wrong type\")\n else:\n xx_ = memoryview(_tmparr_xx)\n \n else:\n if xx_.format != \"d\":\n xx_ = memoryview(array.array(\"d\",xx))\n \n if y is None:\n y_ = None\n else:\n try:\n y_ = memoryview(y)\n except TypeError:\n try:\n _tmparr_y = array.array(\"d\",y)\n except TypeError:\n raise TypeError(\"Argument y has wrong type\")\n else:\n y_ = memoryview(_tmparr_y)\n \n else:\n if y_.format != \"d\":\n y_ = memoryview(array.array(\"d\",y))\n \n if slc is None:\n slc_ = None\n else:\n try:\n slc_ = memoryview(slc)\n except TypeError:\n try:\n _tmparr_slc = array.array(\"d\",slc)\n except TypeError:\n raise TypeError(\"Argument slc has wrong type\")\n else:\n slc_ = memoryview(_tmparr_slc)\n \n else:\n if slc_.format != \"d\":\n slc_ = memoryview(array.array(\"d\",slc))\n \n if suc is None:\n suc_ = None\n else:\n try:\n suc_ = memoryview(suc)\n except TypeError:\n try:\n _tmparr_suc = array.array(\"d\",suc)\n except TypeError:\n raise TypeError(\"Argument suc has wrong type\")\n else:\n suc_ = memoryview(_tmparr_suc)\n \n else:\n if suc_.format != \"d\":\n suc_ = memoryview(array.array(\"d\",suc))\n \n if slx is None:\n slx_ = None\n else:\n try:\n slx_ = memoryview(slx)\n except TypeError:\n try:\n _tmparr_slx = array.array(\"d\",slx)\n except TypeError:\n raise TypeError(\"Argument slx has wrong type\")\n else:\n slx_ = memoryview(_tmparr_slx)\n \n else:\n if slx_.format != \"d\":\n slx_ = memoryview(array.array(\"d\",slx))\n \n if sux is None:\n sux_ = None\n else:\n try:\n sux_ = memoryview(sux)\n except TypeError:\n try:\n _tmparr_sux = array.array(\"d\",sux)\n except TypeError:\n raise TypeError(\"Argument sux has wrong type\")\n else:\n sux_ = memoryview(_tmparr_sux)\n \n else:\n if sux_.format != \"d\":\n sux_ = memoryview(array.array(\"d\",sux))\n \n if snx is None:\n snx_ = None\n else:\n try:\n snx_ = memoryview(snx)\n except TypeError:\n try:\n _tmparr_snx = array.array(\"d\",snx)\n except TypeError:\n raise TypeError(\"Argument snx has wrong type\")\n else:\n snx_ = memoryview(_tmparr_snx)\n \n else:\n if snx_.format != \"d\":\n snx_ = memoryview(array.array(\"d\",snx))\n \n res = self.__obj.putsolution(whichsol_,skc_,skx_,skn_,xc_,xx_,y_,slc_,suc_,slx_,sux_,snx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def solve_pcaw(y, A_fun, AT_fun, lambda_l1, reshape_img_fun, head, invhead, mean, show_img_progress=False, alpha=0.2, max_iter=100, solver_tol=1e-6):\n\n\n obj_lss = np.zeros(max_iter)\n x_zs = np.zeros(max_iter)\n u_norms = np.zeros(max_iter)\n times = np.zeros(max_iter)\n\n ATy = AT_fun(y)\n x_shape = ATy.shape\n d = np.prod(x_shape)\n \n def vec(x):\n return tf.reshape(x, [-1])\n\n def A_cgs_fun(x):\n x = tf.reshape(x,x_shape)\n y = AT_fun(A_fun(x)) + alpha * x\n return vec(y)\n A_cgs = LinearOperator((d,d), matvec=A_cgs_fun, dtype='float')\n\n def compute_p_inv_A(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs, vec(b), x0=vec(z0), tol=1e-3, maxiter=100)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = tf.reshape(z, x_shape)\n return z\n\n\n def A_cgs_fun_init(x):\n x = tf.reshape(x, x_shape)\n y = AT_fun(A_fun(x))\n return vec(y)\n A_cgs_init = LinearOperator((d,d), matvec=A_cgs_fun_init, dtype='float')\n\n def compute_init(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs_init, vec(b), x0=vec(z0), tol=1e-2)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = tf.reshape(z,x_shape)\n return z\n\n # initialize z and u\n z = tf.reshape(mean,x_shape)\n u = np.zeros(x_shape)\n\n plot_normalozer = matplotlib.colors.Normalize(vmin=0.0, vmax=1.0, clip=True)\n\n\n start_time = timeit.default_timer()\n\n for iter in range(max_iter):\n\n # x-update\n net_input = z+u\n \n Wzu = head([net_input])\n q = tfp.math.soft_threshold(Wzu, lambda_l1/alpha)\n x = invhead(q)[0]\n\n # z-update\n b = ATy + alpha * (x - u)\n z = compute_p_inv_A(b, z)\n\n # u-update\n u += z - x;\n\n if show_img_progress:\n\n fig = plt.figure('current_sol')\n plt.gcf().clear()\n fig.canvas.set_window_title('iter %d' % iter)\n plt.subplot(1,3,1)\n plt.imshow(reshape_img_fun(np.clip(x, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('x')\n plt.subplot(1,3,2)\n plt.imshow(reshape_img_fun(np.clip(z, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('z')\n plt.subplot(1,3,3)\n plt.imshow(reshape_img_fun(np.clip(net_input, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('netin')\n plt.pause(0.00001)\n\n\n obj_ls = 0.5 * np.sum(np.square(y - A_fun(x)))\n x_z = np.sqrt(np.mean(np.square(x-z)))\n u_norm = np.sqrt(np.mean(np.square(u)))\n\n obj_lss[iter] = obj_ls\n x_zs[iter] = x_z\n u_norms[iter] = u_norm\n times[iter] = timeit.default_timer() - start_time\n\n if x_z < solver_tol:\n break\n\n infos = {'obj_lss': obj_lss, 'x_zs': x_zs, 'u_norms': u_norms,\n 'times': times, 'alpha':alpha, 'lambda_l1':lambda_l1,\n 'max_iter':max_iter, 'solver_tol':solver_tol}\n\n\n return (x, z, u, infos)", "def solve(self):\n\n # Open status display\n fmtstr, nsep = self.display_start()\n\n # Start solve timer\n self.timer.start(['solve', 'solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n\n # Main optimisation iterations\n for self.k in range(self.k, self.k + self.opt['MaxMainIter']):\n\n # Update record of X and Y from previous iteration\n self.on_iteration_start()\n\n # Compute backtracking\n if self.opt['Backtrack'] is not None and self.k >= 0:\n self.timer.stop('solve_wo_btrack')\n # Compute backtracking\n self.backtrack.update(self)\n self.timer.start('solve_wo_btrack')\n else:\n # Compute just proximal step\n self.xstep()\n # Update by combining previous iterates\n self.ystep()\n\n # Compute residuals and stopping thresholds\n self.timer.stop(['solve_wo_rsdl', 'solve_wo_btrack'])\n if not self.opt['FastSolve']:\n frcxd, adapt_tol = self.compute_residuals()\n self.timer.start('solve_wo_rsdl')\n\n # Compute and record other iteration statistics and\n # display iteration stats if Verbose option enabled\n self.timer.stop(['solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n if not self.opt['FastSolve']:\n itst = self.iteration_stats(self.k, frcxd)\n self.itstat.append(itst)\n self.display_status(fmtstr, itst)\n self.timer.start(['solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n\n # Call callback function if defined\n if self.opt['Callback'] is not None:\n if self.opt['Callback'](self):\n break\n\n # Stop if residual-based stopping tolerances reached\n if not self.opt['FastSolve']:\n if frcxd < adapt_tol:\n break\n\n # Increment iteration count\n self.k += 1\n\n # Record solve time\n self.timer.stop(['solve', 'solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n\n # Print final separator string if Verbose option enabled\n self.display_end(nsep)\n\n return self.getmin()", "def test_solvers_bc():\n tol = 3E-12 # Appropriate tolerance for these tests (P2, 20x20 mesh)\n import sympy as sym\n x, y = sym.symbols('x[0], x[1]')\n u = 1 + x**2 + 2*y**2\n f = -sym.diff(u, x, 2) - sym.diff(u, y, 2)\n f = sym.simplify(f)\n u_00 = u.subs(x, 0) # x=0 boundary\n u_01 = u.subs(x, 1) # x=1 boundary\n g = -sym.diff(u, y).subs(y, 1) # x=1 boundary\n r = 1000 # arbitrary function can go here\n s = u\n\n # Turn to C/C++ code for UFL expressions\n f = sym.printing.ccode(f)\n u_00 = sym.printing.ccode(u_00)\n u_01 = sym.printing.ccode(u_01)\n g = sym.printing.ccode(g)\n r = sym.printing.ccode(r)\n s = sym.printing.ccode(s)\n print('Test problem (C/C++):\\nu = %s\\nf = %s' % (u, f))\n print('u_00: %s\\nu_01: %s\\ng = %s\\nr = %s\\ns = %s' %\n (u_00, u_01, g, r, s))\n\n # Turn into FEniCS objects\n u_00 = Expression(u_00)\n u_01 = Expression(u_01)\n f = Expression(f)\n g = Expression(g)\n r = Expression(r)\n s = Expression(s)\n u_exact = Expression(sym.printing.ccode(u))\n\n # Define boundary conditions\n boundary_conditions = {0: {'Dirichlet': u_00},\n 1: {'Dirichlet': u_01},\n 2: {'Robin': (r, s)},\n 3: {'Neumann': g}}\n\n for Nx, Ny in [(3,3), (3,5), (5,3), (20,20)]:\n for degree in 1, 2, 3:\n for linear_solver in ['direct']:\n print('solving on 2(%dx%dx) mesh with P%d elements'\n % (Nx, Ny, degree)),\n print(' %s solver, %s function' %\n (linear_solver, solver_func.__name__))\n kappa = Constant(1)\n u, kappa = solver_bc(\n kappa, f, boundary_conditions, Nx, Ny, degree,\n linear_solver=linear_solver,\n abs_tol=0.1*tol,\n rel_tol=0.1*tol)\n # Make a finite element function of the exact u_D\n V = u.function_space()\n u_e_Function = interpolate(u_exact, V) # exact solution\n # Check that dof arrays are equal\n u_e_array = u_e_Function.vector().array() # dof values\n max_error = (u_e_array - u.vector().array()).max()\n msg = 'max error: %g for 2(%dx%d) mesh, degree=%d,'\\\n ' %s solver, %s' % \\\n (max_error, Nx, Ny, degree, linear_solver,\n solver_func.__name__)\n print(msg)\n assert max_error < tol, msg", "def solve_prep(self):\n\n par = self.par\n sol = self.sol\n\n # a. retirement\n sol.m_ret = np.zeros((par.T,par.Nm_ret))\n sol.c_ret = np.zeros((par.T,par.Nm_ret))\n sol.a_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_v_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vm_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vn_ret = np.zeros((par.T,par.Nm_ret))\n\n # b. working\n if par.solmethod == 'G2EGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.ucon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.dcon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.acon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.z = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n \n elif par.solmethod == 'NEGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((0,0,0))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((0,0,0))\n \n sol.c_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))\n sol.inv_v_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))", "def solve(self, state, times):", "def _solve_explicit(self, initial_conditions):\n coeff = self.a ** 2 * self.tau / self.h ** 2\n current_solution = initial_conditions\n next_solution = np.empty_like(current_solution)\n solutions = []\n\n for t in self.t_grid:\n next_solution[1:-1] = (\n current_solution[1:-1]\n + (current_solution[:-2] - 2 * current_solution[1:-1] + current_solution[2:]) * coeff\n ) + self.rhs(self.x_grid[1:-1], t) * self.tau\n\n # left bc\n if self.left_bc_type == \"DIRICHLET\":\n next_solution[0] = self.left_bc(t)\n elif self.left_bc_type == \"NEUMANN\":\n next_solution[0] = (\n 4 * next_solution[1]\n - next_solution[2]\n - 2 * self.h * self.left_bc(t)\n ) / 3.0\n\n # right bc\n if self.right_bc_type == \"DIRICHLET\":\n next_solution[-1] = self.right_bc(t)\n elif self.right_bc_type == \"NEUMANN\":\n next_solution[-1] = (\n 4 * next_solution[-2]\n - next_solution[-3]\n + 2 * self.h * self.right_bc(t)\n ) / 3.0\n if self.mode == \"VISUALIZATION\":\n solutions.append((t, next_solution.copy()))\n current_solution = next_solution\n if self.mode == \"TEST\":\n # print(\"Result: \", current_solution.tolist())\n # print(\"Right answer: \", self.anl_solution.tolist())\n self._norma(current_solution)\n elif self.mode == \"VISUALIZATION\":\n return solutions", "def u_update(eta_0, eta, eta_lin, w_0, w, w_lin, eta_T_H_L_stacked, premultiplied_lhs = None, nnls_max_iter=50): \n # PREMULTIPLIED LHS IS AN EXTRA ARGUMENT! Set it to None and add solver! \n \"\"\"In the following +[[]] and [:-1] are added to keep thing 1dim array of objects and still multiply it elemtwisely\"\"\" \n# #B.append([]) #THIS IS WRONG, CHANGES THE LIST \n# B_concat = np.concatenate((1/np.sqrt(2*eta))*np.array(B+[[]])[:-1], axis = 0) \n# A_ls = np.concatenate([(1/np.sqrt(2*eta0))*A, B_concat], axis = 0) \n# #print(np.array(B).shape) \n# #print(w[0].shape) \n# #print(w, eta) \n# #w.append([]) THIS IS WRONG, CHANGES THE LIST \n# w_concat = np.concatenate((1/np.sqrt(2*eta))*np.array(w+[[]])[:-1], axis = 0) #[:-1] Added as a hack to keep it one-dim array of objects \n# eta_w = np.expand_dims(1/np.sqrt(2*eta),1)*np.array(w) \n# print(eta_w.shape) \n# b_ls = np.concatenate([(1/np.sqrt(2*eta_0))*w_0, eta_w.flatten()], axis = 0) \n #Use correct broadcasting?\n w_concat = np.concatenate((1/np.sqrt(2*eta))*np.array(w+[[]])[:-1], axis = 0) #[:-1] Added as a hack to keep it one-dim array of objects \n b_ls = np.concatenate([(1/np.sqrt(2*eta_0))*w_0, w_concat, (1/np.sqrt(2*eta_lin))*w_lin], axis = 0) \n# print(np.sum(eta_w.flatten() != w_concat)) \n# premultiplied_time_start = time.time() \n# premultiplied_lhs = eta_T_H_stacked.T.dot(eta_T_H_stacked).toarray() \n# premultiplied_time_end = time.time() \n# print('premultiplying took {}'.format(premultiplied_time_end - premultiplied_time_start)) \n# premultiplied_rhs = eta_T_H_stacked.T.dot(b_ls) \n# u_next = nnls_predotted(premultiplied_lhs, premultiplied_rhs, tol=1e-5) \n# print(eta_T_H_stacked.shape, b_ls.shape) \n# A_ls_t_b = eta_T_H_stacked.T.dot(b_ls) \n# w =scipy.sparse.linalg.spsolve_triangular(RT, A_ls_t_b, lower = True) \n# x = scipy.sparse.linalg.spsolve_triangular(R, w, lower = False) \n# u_next = x \n u_next = scipy.optimize.lsq_linear(eta_T_H_L_stacked, b_ls, bounds = (0, np.inf), tol=1e-3, lsmr_tol=1e-3, max_iter=nnls_max_iter, verbose=1).x \n# u = scipy.optimize.lsq_linear(premultiplied_lhs, premultiplied_rhs, bounds = (0, np.inf), tol=1e-5).x \n return u_next", "def test_solvers():\n # With P1 elements we have an error E-15 with Krylov solver\n # tolerances of 1E-12, but with P2 elements the error is E-6.\n # P3 elements drive the tolerance down to E-3.\n # For higher mesh resolution we also need reduced tolerances.\n # The tol dict maps degree to expected tolerance for the coarse\n # meshes in the test.\n tol = {'direct': {1: 1E-11, 2: 1E-11, 3: 1E-11},\n 'Krylov': {1: 1E-14, 2: 1E-05, 3: 1E-03}}\n u_D = Expression('1 + x[0]*x[0] + 2*x[1]*x[1]')\n kappa = Expression('x[0] + x[1]')\n f = Expression('-8*x[0] - 10*x[1]')\n for Nx, Ny in [(3,3), (3,5), (5,3)]:\n for degree in 1, 2, 3:\n for linear_solver in 'direct', 'Krylov':\n for solver_func in solver, solver_objects:\n print('solving on 2(%dx%dx) mesh with P%d elements'\n % (Nx, Ny, degree)),\n print(' %s solver, %s function' %\n (linear_solver, solver_func.__name__))\n # Important: Krylov solver error must be smaller\n # than tol!\n u = solver_func(\n kappa, f, u_D, Nx, Ny, degree,\n linear_solver=linear_solver,\n abs_tol=0.1*tol[linear_solver][degree],\n rel_tol=0.1*tol[linear_solver][degree])\n # Make a finite element function of the exact u_D\n V = u.function_space()\n u_D_Function = interpolate(u_D, V) # exact solution\n # Check that dof arrays are equal\n u_D_array = u_D_Function.vector().array() # dof values\n max_error = (u_D_array - u.vector().array()).max()\n msg = 'max error: %g for 2(%dx%d) mesh, degree=%d,'\\\n ' %s solver, %s' % \\\n (max_error, Nx, Ny, degree, linear_solver,\n solver_func.__name__)\n print(msg)\n assert max_error < tol[linear_solver][degree], msg", "def solve(self):\n for step in self.run.values():\n step.solve()", "def _cg_solve(self, x, data, iters, lambd, tol):\n b = np.zeros(\n (\n self.image_dim,\n self.image_dim\n ),\n self.DTYPE\n )\n Ax = np.zeros(\n (\n self.image_dim,\n self.image_dim\n ),\n self.DTYPE\n )\n\n b = self.operator_rhs(data)\n residual = b\n p = residual\n delta = np.linalg.norm(residual) ** 2 / np.linalg.norm(b) ** 2\n self.res.append(delta)\n print(\"Initial Residuum: \", delta)\n\n for i in range(iters):\n Ax = self.operator_lhs(p)\n Ax = Ax + lambd * p\n alpha = np.vdot(residual, residual)/(np.vdot(p, Ax))\n x[i + 1] = x[i] + alpha * p\n residual_new = residual - alpha * Ax\n delta = np.linalg.norm(residual_new) ** 2 / np.linalg.norm(b) ** 2\n self.res.append(delta)\n if delta < tol:\n print(\"\\nConverged after %i iterations to %1.3e.\" %\n (i+1, delta))\n x[0] = b\n return x[:i+1, ...]\n if not np.mod(i, 1):\n print(\"Residuum at iter %i : %1.3e\" % (i+1, delta), end='\\r')\n\n beta = (np.vdot(residual_new, residual_new) /\n np.vdot(residual, residual))\n p = residual_new + beta * p\n (residual, residual_new) = (residual_new, residual)\n x[0] = b\n return x", "def solve_csp(nodes, arcs, max_steps):\n\n nodes = list(nodes)\n print 'nodes:', nodes\n\n node_values_dict = dict(zip(nodes, '2'*len(set(nodes))))\n print 'initial random assignment', node_values_dict\n indexes = np.arange(len(nodes))\n\n graph = {}\n for arc in arcs:\n if not arc[0] in graph:\n graph[arc[0]] = []\n if not arc[1] in graph:\n graph[arc[1]] = []\n graph[arc[0]].append(arc[1])\n graph[arc[1]].append(arc[0])\n for i in indexes:\n if i in graph:\n continue\n else:\n graph[i] = []\n graph = dict(sorted(graph.items()))\n print 'graph:', graph\n\n domain = [i for i in np.arange(1, 10, 1)]\n print 'initial domain for each node:', domain\n\n superAdjacency ={}\n for i in np.arange(len(nodes)):\n superAdjacency[i] = []\n superAdjacency[i].append(nodes[i])\n superAdjacency[i].append(node_values_dict[nodes[i]])\n superAdjacency[i].append(graph[i])\n superAdjacency[i].append(domain)\n\n print 'superAdjacency', superAdjacency\n\n def getNodeType(superAdjacency, index):\n return list(superAdjacency[index])[0]\n\n def getCurrentAssignment(superAdjacency, index):\n return list(superAdjacency[index])[1]\n\n def getCurrentAssignmentForList(superAdjacency, indexList):\n return [int(list(superAdjacency[index])[1]) for index in indexList]\n\n def getSolution(superAdjacency):\n return [int(list(superAdjacency[index])[1]) for index in superAdjacency]\n\n def getNeighbours(superAdjacency, index):\n return list(superAdjacency[index])[2]\n\n def getDomain(superAdjacency, index):\n return list(superAdjacency[index])[3]\n\n def updateSuperAdjacency(superAdjacency, nodeType, newValue):\n updateList =[]\n for i in superAdjacency:\n if str(getNodeType(superAdjacency, i)) == nodeType:\n updateList.append(i)\n for i in updateList:\n superAdjacency[i][1] = newValue\n\n def isSolution():\n return graphConstraints(superAdjacency)\n\n def graphConstraints(superAdjacency):\n graphEval = []\n\n for index in superAdjacency:\n neighbours = getNeighbours(superAdjacency, index)\n nodeType = getNodeType(superAdjacency, index)\n\n if nodeType == 'T':\n graphEval.append(int(str(eval(str(\n getCurrentAssignmentForList(superAdjacency, neighbours)).replace(',', '*'))[0])[0]))\n elif nodeType == 'C':\n return 'NA'\n elif nodeType == 'S':\n graphEval.append(int(str(eval(str(\n getCurrentAssignmentForList(superAdjacency, neighbours)).replace(',', '*'))[0])[::-1][0]))\n elif nodeType == 'H':\n graphEval.append(int(str(np.sum(\n getCurrentAssignmentForList(superAdjacency, neighbours)))[0]))\n if nodeType == 'P':\n graphEval.append(int(str(np.sum(\n getCurrentAssignmentForList(superAdjacency, neighbours)))[::-1][0]))\n\n currentAssignment = [item[1] for item in superAdjacency.values()]\n difference = map(sub, currentAssignment, graphEval)\n\n if sum(difference) == 0:\n return True\n else:\n return difference\n\n def findConflictVariable(superAdjacency, lastUpdateNode):\n node_conflict_count = {}\n for node in node_values_dict:\n node_conflict_count[node] = 0\n for index in superAdjacency:\n neighbours = getNeighbours(superAdjacency, index)\n nodeType = getNodeType(superAdjacency, index)\n if nodeType == 'T':\n try:\n if getCurrentAssignment(superAdjacency, index) != \\\n int(str(eval(str(getCurrentAssignmentForList(superAdjacency, neighbours)).replace(',', '*'))[0])[0]):\n node_conflict_count[nodeType] = node_conflict_count[nodeType] + 1\n except:\n continue\n elif nodeType == 'S':\n try:\n if getCurrentAssignment(superAdjacency, index) != int(str(eval(str(getCurrentAssignmentForList(superAdjacency, neighbours)).replace(',', '*'))[0])[::-1][0]):\n node_conflict_count[nodeType] = node_conflict_count[nodeType] + 1\n except:\n continue\n elif nodeType == 'H':\n try:\n if getCurrentAssignment(superAdjacency, index) != int(str(np.sum(getCurrentAssignmentForList(superAdjacency, neighbours)))[0]):\n node_conflict_count[nodeType] = node_conflict_count[nodeType] + 1\n except:\n continue\n if nodeType == 'P':\n try:\n if getCurrentAssignment(superAdjacency, index) != int(str(np.sum(getCurrentAssignmentForList(superAdjacency, neighbours)))[::-1][0]):\n node_conflict_count[nodeType] = node_conflict_count[nodeType] + 1\n except:\n continue\n choices = [k for k, v in node_conflict_count.items() if v > 0]\n if len(choices) > 0:\n updateNode = random.choice(choices)\n\n if updateNode == lastUpdateNode:\n choices.pop(choices.index(updateNode))\n try:\n lastUpdateNode = random.choice(choices)\n return lastUpdateNode, lastUpdateNode\n except:\n return lastUpdateNode, lastUpdateNode\n else:\n lastUpdateNode = updateNode\n return updateNode, lastUpdateNode\n else:\n return 'NA', 'NA'\n\n\n\n def valueForConflictedVariable(superAdjacency, var):\n for index in superAdjacency:\n nodeType = getNodeType(superAdjacency, index)\n neighbours = getNeighbours(superAdjacency, index)\n if not neighbours:\n continue\n elif str(nodeType) == str(var):\n domain = getDomain(superAdjacency, index)\n\n choice = random.choice(domain)\n if nodeType == 'T':\n choice = int(str(eval(str(getCurrentAssignmentForList(superAdjacency, neighbours)).replace(',', '*'))[0])[0])\n elif nodeType == 'S':\n choice = int(str(eval(str(getCurrentAssignmentForList(superAdjacency, neighbours)).replace(',', '*'))[0])[::-1][0])\n elif nodeType == 'H':\n choice = int(str(np.sum(getCurrentAssignmentForList(superAdjacency, neighbours)))[0])\n if nodeType == 'P':\n choice = int(str(np.sum(getCurrentAssignmentForList(superAdjacency, neighbours)))[::-1][0])\n\n choice = int(choice)\n if choice % 2 == 0:\n return choice\n else:\n return choice\n\n def min_conflicts(nodes, arcs, max_steps):\n lastUpdateNode = ''\n for i in range(max_steps):\n if isSolution() == True:\n return\n var, lastUpdateNode = findConflictVariable(superAdjacency, lastUpdateNode)\n if var != 'NA':\n value = valueForConflictedVariable(superAdjacency, var)\n updateSuperAdjacency(superAdjacency, var, value)\n node_values_dict[var] = value\n else:\n pass\n\n return\n\n min_conflicts(nodes, arcs, max_steps)\n node_values = getSolution(superAdjacency)\n return node_values", "def refinemesh(prev_legs, state0_chaser, n_s):\n scored_points = np.array([])\n for leg in prev_legs:\n scored_point = [*leg.dv, leg.t_leg, leg.score]\n scored_points = np.append(scored_points, scored_point)\n scored_points = scored_points.reshape(len(prev_legs), 5)\n tri = Delaunay(scored_points[:, 0:4], qhull_options='QJ')\n m_max = max(scored_points[:, 4]) # Maximum trajectory score of all simplices of the triangulation\n if m_max == 0:\n print('algorithm.py: m_max = 0 because all leg scores are 0!!!')\n m_max = 1 # to avoid raising the dividing by 0 error if all leg scores are 0\n g_max = 1\n for q in tri.simplices:\n smplx_scores = scored_points[:, 4][q] # scores of the points defining the simplex\n aux = mean(smplx_scores)\n if g_max < aux:\n g_max = aux\n\n simplices_scored = []\n for q in tri.simplices:\n smplx_score = compute_simplexscore(q, scored_points, m_max, g_max)\n # simp_scored = [smplx_score, q_vec]\n simplices_scored.append([smplx_score, q])\n sorted_simp_scores = sorted(simplices_scored, reverse=True) # ranks the simplices based on score\n new_samples = samplewithinbestsimplices(sorted_simp_scores, tri.points, n_s)\n\n new_legs = []\n for s in new_samples:\n leg = Leg(s[0:3], s[3], state0_chaser)\n new_legs.append(leg)\n\n return new_legs", "def rwgraph_analyze2(input=(None)):\r\n\r\n\r\n #set up graph and degree distribution arrays\r\n n=2000\r\n m=4\r\n G=nx.barabasi_albert_graph(n, m, seed=5)\r\n Nt=100\r\n M=20000\r\n maxdeg=0\r\n degree_dist=[]\r\n for i in range(0,n):\r\n degree_dist.append(G.degree[i])\r\n if G.degree[i]>maxdeg:\r\n maxdeg=G.degree[i]\r\n j=i\r\n\r\n #set inital conditions and D\r\n y0=np.zeros(n,dtype=int)\r\n y0[j]=200\r\n D=1\r\n #define time for odi Int\r\n t=np.arange(Nt+1,dtype=int)\r\n #set up operators\r\n A = nx.adjacency_matrix(G)\r\n Q = A.toarray().sum(axis=1)\r\n L=np.diag(Q)-A.toarray()\r\n Q_inv=1/Q\r\n Ls=np.diag(np.ones(n))-np.matmul(np.diag(Q_inv),A.toarray())\r\n Ls_tran=np.transpose(Ls)\r\n\r\n #convert to sparse operators and include diffusion\r\n L_spar = scipy.sparse.csr_matrix(-D*L)\r\n Ls_spar = scipy.sparse.csr_matrix(-D*Ls)\r\n Ls_tran_spar = scipy.sparse.csr_matrix(-D*Ls_tran)\r\n A=nx.adjacency_matrix(G)\r\n L=-D*(scipy.sparse.diags(degree_arr)-A)\r\n Ls=-D*(scipy.sparse.diags(np.ones(N))-scipy.sparse.diags(1/degree_arr).dot(A))\r\n\r\n #define operators\r\n def Lap(y,t):\r\n return scipy.sparse.csr_matrix.__mul__(L_spar,y)\r\n def Lap_Ls(y,t):\r\n return scipy.sparse.csr_matrix.__mul__(Ls_spar,y)\r\n def Lap_Ls_tran(y,t):\r\n return scipy.sparse.csr_matrix.__mul__(Ls_tran_spar,y)\r\n\r\n #solutions of different operators\r\n solL=scipy.integrate.odeint(Lap,y0,t)\r\n solLs=scipy.integrate.odeint(Lap_Ls,y0,t)\r\n solLs_tran=scipy.integrate.odeint(Lap_Ls_tran,y0,t)\r\n\r\n\r\n #finds eigen values and vectors and puts them into order\r\n def eigen(L):\r\n eigen_values,eigen_vectors=scipy.linalg.eig(-L)\r\n idx = eigen_values.argsort()[::-1]\r\n eigen_values = eigen_values[idx]\r\n eigen_vectors = eigen_vectors[:,idx]\r\n return eigen_values,eigen_vectors\r\n\r\n #finds all eigen values and eigen vectors of the different operators. can use sparse matrics\r\n eigen_values_LS,eigen_vectors_LS=eigen(Ls)\r\n eigen_values_LS_tran,eigen_vectors_LS_tran=eigen(Ls_tran)\r\n eigen_values_L,eigen_vectors_L=eigen(L)\r\n eigen_values_L2,eigen_vectors_L2=eigen(L*0.36)\r\n\r\n ### could have eigs here as didn't end up using all eigenvalues ####\r\n #eigen values graph\r\n n0=len(eigen_values_L)\r\n eig_nums=np.arange(n0)\r\n plt.figure(figsize=(12, 6))\r\n plt.scatter(eig_nums[0:10],eigen_values_L2[0:10],s=50,marker=\"x\" ,label='L , D=0.36')\r\n plt.scatter(eig_nums[0:10],eigen_values_LS[0:10],s=50, marker=\"|\",label='LS , D=1')\r\n plt.scatter(eig_nums[0:10],eigen_values_LS_tran[0:10],s=50,marker='_',label='LS_tran , D=1')\r\n plt.scatter(eig_nums[0:10],eigen_values_L[0:10],s=50,marker=\"+\" ,label='L , D=1')\r\n plt.legend(loc=\"lower left\", fontsize=12,fancybox=True, framealpha=1, shadow=True, borderpad=1)\r\n plt.xlabel('eigen value number')\r\n plt.ylabel('eigenvalue')\r\n plt.title(\"Eigenvlaues of Laplacian Matrixs\")\r\n plt.show()\r\n\r\n print(\"4 biggest eigenvalues for each operater\")\r\n print('L=',eigen_values_L[0:4])\r\n print('Ls=',eigen_values_LS[0:4])\r\n print('Ls_tran=',eigen_values_LS_tran[0:4])\r\n #prints 4 biggest eigen values\r\n #counts node distrubtion by creating dictionary\r\n def result_count(sol,Nt,G):\r\n \"\"\" returns cumlative frequency/probailties for nodes of same degree and returns dictionary\"\"\"\r\n n = G.number_of_nodes()\r\n dict_freq={}\r\n for i in range(n):\r\n k=G.degree(i)\r\n if k not in dict_freq:\r\n dict_freq[k]=sol[Nt,i]\r\n else:\r\n dict_freq[k]+=sol[Nt,i]\r\n return dict_freq\r\n\r\n #frequency count of solutions\r\n dict_freq=result_count(solL,Nt,G)\r\n dict_freq2=result_count(solLs,Nt,G)\r\n dict_freq3=result_count(solLs_tran,Nt,G)\r\n\r\n #random walk data\r\n X=rwgraph(G,j,20000,100)\r\n Listnodes7=[]\r\n for i in range(20000):\r\n Listnodes7.append(G.degree(X[i,100]))\r\n X=rwgraph(G,j,200,100)\r\n Listnodes8=[]\r\n for i in range(200):\r\n Listnodes8.append(G.degree(X[i,100]))\r\n X=rwgraph(G,j,50000,5000)\r\n Listnodes9=[]\r\n for i in range(50000):\r\n Listnodes9.append(G.degree(X[i,5000]))\r\n listfreq7=CountFrequency(Listnodes7)\r\n listfreq8=CountFrequency(Listnodes8)\r\n listfreq9=CountFrequency(Listnodes9)\r\n listfreq_deg=CountFrequency(degree_dist)\r\n z2=[]\r\n z3=[]\r\n z1=[]\r\n z_deg2=[]\r\n z_deg3=[]\r\n z_deg1=[]\r\n for i in listfreq7:\r\n z2.append(listfreq7[i]/(listfreq_deg[i]*20000))\r\n z_deg2.append(i)\r\n for i in listfreq8:\r\n z3.append(listfreq8[i]/(listfreq_deg[i]*200))\r\n z_deg3.append(i)\r\n for i in listfreq8:\r\n z1.append(listfreq9[i]/(listfreq_deg[i]*50000))\r\n z_deg1.append(i)\r\n #operator solutions compared to node degree frequency\r\n z4,z5,z6=[],[],[]\r\n z_deg4,z_deg5,z_deg6=[],[],[]\r\n for i in dict_freq:\r\n z4.append(dict_freq[i]/(listfreq_deg[i]*200))\r\n z_deg4.append(i)\r\n for i in dict_freq2:\r\n z5.append(dict_freq2[i]/(listfreq_deg[i]*200))\r\n z_deg5.append(i)\r\n for i in dict_freq3:\r\n z6.append(dict_freq3[i]/(listfreq_deg[i]*200))\r\n z_deg6.append(i)\r\n\r\n plt.figure(figsize=(15, 10))\r\n plt.scatter(z_deg1, z1,label='Nt=5000, M=50000')\r\n plt.scatter(z_deg2, z2,label='Nt=100, M=20000')\r\n plt.scatter(z_deg3, z3,label='Nt=100, M=200')\r\n plt.scatter(z_deg4, z4,label='L, Nt=100')\r\n plt.scatter(z_deg5, z5,label='Ls, Nt=100')\r\n plt.scatter(z_deg6, z6,label='Ls_tran, Nt=100')\r\n plt.ylim((-0.005,0.020))\r\n plt.xlabel('degree of node')\r\n plt.ylabel('frequency of final position / M*frequency of degree')\r\n plt.legend(loc=\"upper left\", fontsize=12,fancybox=True, framealpha=1, shadow=True, borderpad=1)\r\n plt.title(\"Frequency of final positions relative to number of nodes of that degree, for changing times Nt and M.\")\r\n plt.show()\r\n\r\n #code to produce final graph\r\n iarray1=LinearModel(G,x=j,i0=1,L1='L',D=1,tf=20,Nt=Nt)\r\n iarray2=LinearModel(G,x=j,i0=1,L1='Ls',D=1,tf=20,Nt=Nt)\r\n iarray3=LinearModel(G,x=j,i0=1,L1='Lst',D=1,tf=20,Nt=Nt)\r\n tarray = np.linspace(0,5,Nt+1)\r\n plt.figure(figsize=(12, 6))\r\n plt.plot(tarray, iarray1[:,7] ,label='rand node L,deg=46',color='b',alpha=0.5)\r\n plt.plot(tarray, iarray2[:,7] ,label='rand node Ls,deg=46',marker='|',color='r')\r\n plt.scatter(tarray, iarray3[:,7] ,label='rand node LST,deg=46',marker='_',color='y')\r\n plt.scatter(tarray, iarray1[:,1801] ,label='rand node L, deg=5',color='m',alpha=0.5,marker='+')\r\n plt.plot(tarray, iarray2[:,1801] ,label='rand node Ls,deg=5',marker='|',color='c')\r\n plt.scatter(tarray, iarray3[:,1801] ,label='rand node LST,deg=5',marker='_',color='g')\r\n plt.xlabel('time')\r\n plt.ylabel('representive frequency')\r\n plt.legend()\r\n plt.title(\"Comparing repestive frequency of a random nodes, for the different linear models,time step=50,D=0.1\")\r\n plt.show()\r\n return None #modify as needed\r", "def mbed_solve (A, budgets, S, verbose=True):\n # print(S)\n start_time = time.time()\n x_v, C = initialize(A, S)\n if (verbose):\n print(\"Initialized\")\n print(\"V1: \", np.sum(x_v == 1), \" ,V2: \", np.sum(x_v == -1))\n results_info, S_new, Ad, edges_removed = random_choose_candidate_solve (x_v, C, A, S, budgets, start_time, verbose=verbose)\n return results_info, S_new, Ad, edges_removed", "def method2(self):\n cres=np.zeros(self.NL,dtype=float) # List of invariants\n # The U matrices from Fukui's method; storage...\n Ux_loc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy_loc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n \n for il in range(self.NL):\n # ... and calculation of U matrices for each layer\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.LDM[il,ix ,iy ,:,:]\n mat2=self.LDM[il,(ix%self.kS.Nx)+1 ,iy ,:,:]\n mat3=self.LDM[il,ix ,(iy%self.kS.Ny)+1 ,:,:]\n \n Ux_loc[ix,iy]=np.dot(np.conj(mat1.T),mat2)[1,1]\n Uy_loc[ix,iy]=np.dot(np.conj(mat1.T),mat3)[1,1]\n \n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux_loc[ix,iy]*Uy_loc[ix+1,iy]/Ux_loc[ix,iy+1]/Uy_loc[ix,iy])\n cres[il]+=(ftemp/2./pi/1j).real # Layer specific topological invariant\n \n return cres", "def Solve(self, cost, termination=None, ExtraArgs=(), **kwds):\n # process and activate input settings\n sigint_callback = kwds.pop('sigint_callback', None)\n settings = self._process_inputs(kwds)\n disp = settings['disp'] if 'disp' in settings else False\n echo = settings['callback'] if 'callback' in settings else None\n# for key in settings:\n# exec \"%s = settings['%s']\" % (key,key)\n if disp in ['verbose', 'all']: verbose = True\n else: verbose = False\n #-------------------------------------------------------------\n\n from python_map import python_map\n if self._map != python_map:\n #FIXME: EvaluationMonitor fails for MPI, throws error for 'pp'\n from mystic.monitors import Null\n evalmon = Null()\n else: evalmon = self._evalmon\n fcalls, cost = wrap_function(cost, ExtraArgs, evalmon)\n\n # set up signal handler\n #self._EARLYEXIT = False\n self._generateHandler(sigint_callback) \n\n # activate signal_handler\n #import threading as thread\n #mainthread = isinstance(thread.current_thread(), thread._MainThread)\n #if mainthread: #XXX: if not mainthread, signal will raise ValueError\n import signal\n if self._handle_sigint:\n signal.signal(signal.SIGINT,self.signal_handler)\n\n # register termination function\n if termination is not None: self.SetTermination(termination)\n\n # get the nested solver instance\n solver = self._AbstractEnsembleSolver__get_solver_instance()\n #-------------------------------------------------------------\n\n # generate starting points\n initial_values = self._InitialPoints()\n\n # run optimizer for each grid point\n from copy import deepcopy as _copy\n op = [_copy(solver) for i in range(len(initial_values))]\n #cf = [cost for i in range(len(initial_values))]\n vb = [verbose for i in range(len(initial_values))]\n cb = [echo for i in range(len(initial_values))] #XXX: remove?\n at = self.id if self.id else 0 # start at self.id\n id = range(at,at+len(initial_values))\n\n # generate the local_optimize function\n def local_optimize(solver, x0, rank=None, disp=False, callback=None):\n from copy import deepcopy as _copy\n from mystic.tools import isNull\n solver.id = rank\n solver.SetInitialPoints(x0)\n if solver._useStrictRange: #XXX: always, settable, or sync'd ?\n solver.SetStrictRanges(min=solver._strictMin, \\\n max=solver._strictMax) # or lower,upper ?\n solver.Solve(cost, disp=disp, callback=callback)\n sm = solver._stepmon\n em = solver._evalmon\n if isNull(sm): sm = ([],[],[],[])\n else: sm = (_copy(sm._x),_copy(sm._y),_copy(sm._id),_copy(sm._info))\n if isNull(em): em = ([],[],[],[])\n else: em = (_copy(em._x),_copy(em._y),_copy(em._id),_copy(em._info))\n return solver, sm, em\n\n # map:: solver = local_optimize(solver, x0, id, verbose)\n results = self._map(local_optimize, op, initial_values, id, \\\n vb, cb, **self._mapconfig)\n\n # save initial state\n self._AbstractSolver__save_state()\n #XXX: HACK TO GET CONTENT OF ALL MONITORS\n # reconnect monitors; save all solvers\n from mystic.monitors import Monitor\n while results: #XXX: option to not save allSolvers? skip this and _copy\n _solver, _stepmon, _evalmon = results.pop()\n sm = Monitor()\n sm._x,sm._y,sm._id,sm._info = _stepmon\n _solver._stepmon.extend(sm)\n del sm\n em = Monitor()\n em._x,em._y,em._id,em._info = _evalmon\n _solver._evalmon.extend(em)\n del em\n self._allSolvers[len(results)] = _solver\n del results, _solver, _stepmon, _evalmon\n #XXX: END HACK\n\n # get the results with the lowest energy\n self._bestSolver = self._allSolvers[0]\n bestpath = self._bestSolver._stepmon\n besteval = self._bestSolver._evalmon\n self._total_evals = self._bestSolver.evaluations\n for solver in self._allSolvers[1:]:\n self._total_evals += solver.evaluations # add func evals\n if solver.bestEnergy < self._bestSolver.bestEnergy:\n self._bestSolver = solver\n bestpath = solver._stepmon\n besteval = solver._evalmon\n\n # return results to internals\n self.population = self._bestSolver.population #XXX: pointer? copy?\n self.popEnergy = self._bestSolver.popEnergy #XXX: pointer? copy?\n self.bestSolution = self._bestSolver.bestSolution #XXX: pointer? copy?\n self.bestEnergy = self._bestSolver.bestEnergy\n self.trialSolution = self._bestSolver.trialSolution #XXX: pointer? copy?\n self._fcalls = self._bestSolver._fcalls #XXX: pointer? copy?\n self._maxiter = self._bestSolver._maxiter\n self._maxfun = self._bestSolver._maxfun\n\n # write 'bests' to monitors #XXX: non-best monitors may be useful too\n self._stepmon = bestpath #XXX: pointer? copy?\n self._evalmon = besteval #XXX: pointer? copy?\n self.energy_history = None\n self.solution_history = None\n #from mystic.tools import isNull\n #if isNull(bestpath):\n # self._stepmon = bestpath\n #else:\n # for i in range(len(bestpath.y)):\n # self._stepmon(bestpath.x[i], bestpath.y[i], self.id)\n # #XXX: could apply callback here, or in exec'd code\n #if isNull(besteval):\n # self._evalmon = besteval\n #else:\n # for i in range(len(besteval.y)):\n # self._evalmon(besteval.x[i], besteval.y[i])\n #-------------------------------------------------------------\n\n # restore default handler for signal interrupts\n if self._handle_sigint:\n signal.signal(signal.SIGINT,signal.default_int_handler)\n\n # log any termination messages\n msg = self.Terminated(disp=disp, info=True)\n if msg: self._stepmon.info('STOP(\"%s\")' % msg)\n # save final state\n self._AbstractSolver__save_state(force=True)\n return", "def _solve_resolvedtiles(\n resolvedtiles, matches, nvertex, regularization_lambda,\n regularization_translation_factor, regularization_lens_lambda,\n good_solve_dict,\n logger=default_logger, **kwargs):\n\n # FIXME this is done twice -- think through\n tilespecs = resolvedtiles.tilespecs\n example_tspec = tilespecs[0]\n\n mesh = _create_mesh(resolvedtiles, matches, nvertex, **kwargs)\n\n nend = mesh.points.shape[0]\n\n # logger = logging.getLogger(self.__class__.__name__)\n logger.info(\n \"\\n aimed for %d mesh points, got %d\" %\n (nvertex, nend))\n\n if mesh.points.shape[0] < 0.5*nvertex:\n raise MeshLensCorrectionException(\n \"mesh coarser than intended\")\n\n # prepare the linear algebra and solve\n A, weights, b, lens_dof_start = create_A(\n matches, tilespecs, mesh)\n\n x0 = create_x0(\n A.shape[1], tilespecs)\n\n reg = create_regularization(\n A.shape[1],\n len(tilespecs),\n regularization_lambda,\n regularization_translation_factor,\n regularization_lens_lambda)\n\n solution, errx, erry = solve(\n A, weights, reg, x0, b)\n\n transforms = create_transforms(\n len(tilespecs), solution)\n\n tf_trans, jresult, solve_message = report_solution(\n errx, erry, transforms, good_solve_dict)\n\n logger.info(solve_message)\n\n # check quality of solution\n if not all([\n errx.mean() < good_solve_dict['error_mean'],\n erry.mean() < good_solve_dict['error_mean'],\n errx.std() < good_solve_dict['error_std'],\n erry.std() < good_solve_dict['error_std']]):\n raise MeshLensCorrectionException(\n \"Solve not good: %s\" % solve_message)\n\n logger.debug(solve_message)\n\n new_ref_transform = create_thinplatespline_tf(\n mesh, solution, lens_dof_start, logger)\n\n bbox = example_tspec.bbox_transformed(tf_limit=0)\n tbbox = new_ref_transform.tform(bbox)\n bstr = 'new transform corners:\\n'\n for i in range(bbox.shape[0]-1):\n bstr += \" (%0.1f, %0.1f) -> (%0.1f, %0.1f)\\n\" % (\n bbox[i, 0], bbox[i, 1],\n tbbox[i, 0], tbbox[i, 1])\n logger.info(bstr)\n\n new_tilespecs = new_specs_with_tf(\n new_ref_transform, tilespecs, transforms)\n\n stage_affine = estimate_stage_affine(tilespecs, new_tilespecs)\n sastr = (\n \"affine estimate of tile translations:\\n\" +\n \" scale: {}\\n\".format(stage_affine.scale) +\n \" translation: {}\\n\".format(stage_affine.translation) +\n \" shear: {}\\n\".format(stage_affine.shear) +\n \" rotation: {}\\n\".format(np.degrees(stage_affine.rotation)))\n logger.info(sastr)\n\n resolved = renderapi.resolvedtiles.ResolvedTiles(\n tilespecs=new_tilespecs,\n transformList=[new_ref_transform])\n return resolved, new_ref_transform, jresult", "def solve_elas(self,x,E_p=None):\n \n if x['Crystal_Structure'] == \"Cubic\":\n self.estf = self.Ccubic( x['Stiffness'][0], x['Stiffness'][1], x['Stiffness'][2] )\n\n elif x['Crystal_Structure'] == \"HCP\":\n self.estf = self.Chcp( x['Stiffness'][0], x['Stiffness'][1], x['Stiffness'][2], x['Stiffness'][3], x['Stiffness'][4] )\n\n # Update orientation\n for n in range(9):\n cell_num_list = list((9*self.cell_num)+n)\n self.orient.vector()[cell_num_list] = self.rots[self.subdomain_num,n]\n \n self.a = inner(self.sigs3x3(self.u), sym(grad(self.v)))*dx\n \n if E_p:\n # Note use of sym(), assuming E_p to be the \\chi field\n L_elas_rhs = self.L_elas + inner(self.sigs_e(sym(E_p)), sym(grad(self.v)))*dx\n else:\n L_elas_rhs = self.L_elas \n\n self.A_elas, self.b_elas = assemble_system(self.a, L_elas_rhs, self.bc_elas) \n \n # Attach near nullspace to matrix\n as_backend_type(self.A_elas).set_near_nullspace(self.null_space)\n\n # Set matrix operator\n self.elasticity_solver.set_operator(self.A_elas);\n\n # Compute solution\n self.elasticity_solver.solve(self.ue.vector(), self.b_elas);\n \n if E_p:\n self.Ue_sym = project( sym(grad(self.ue) - E_p), self.TFS, solver_type=\"cg\", preconditioner_type=\"ilu\")\n else:\n self.Ue_sym = project( sym(grad(self.ue)), self.TFS, solver_type=\"cg\", preconditioner_type=\"ilu\")\n \n self.sim_strn = np.reshape(self.Ue_sym.vector().get_local(),(len(self.grains.array()),9))\n\n for grain_no in range(self.grains.array().max()):\n # Grain numbering is 1 index origin\n cell_subset = self.grains.array()==(grain_no+1)\n if np.any(cell_subset):\n self.sim_avg[grain_no,:] = np.average(self.sim_strn[cell_subset,:],\n axis=0,weights=self.dVol[cell_subset]) \n \n deps = self.exp_strn - self.sim_avg\n resid = np.linalg.norm(deps.ravel())\n print(resid) #,self.its)\n return resid", "def calc_refl(velocity, shotloc_x, shotloc_z, layer_idxs):\n solver_dg = pykonal.EikonalSolver(coord_sys=\"cartesian\")\n solver_dg.vv.min_coords = velocity.min_coords\n solver_dg.vv.node_intervals = velocity.node_intervals\n solver_dg.vv.npts = velocity.npts\n solver_dg.vv.values = velocity.values\n\n #shotloc = 2.56 # km\n src_idx = (int((shotloc_x - velocity.min_coords[0])/velocity.node_intervals[0]), int(shotloc_z/velocity.node_intervals[1]), 0)\n solver_dg.tt.values[src_idx] = 0\n solver_dg.unknown[src_idx] = False\n solver_dg.trial.push(*src_idx)\n solver_dg.solve()\n\n solver_ug = pykonal.EikonalSolver(coord_sys=\"cartesian\")\n solver_ug.vv.min_coords = solver_dg.vv.min_coords\n solver_ug.vv.node_intervals = solver_dg.vv.node_intervals\n solver_ug.vv.npts = solver_dg.vv.npts\n solver_ug.vv.values = solver_dg.vv.values\n\n for ix in range(solver_ug.tt.npts[0]):\n #idx = (ix, solver_ug.tt.npts[1]-1, 0)\n idx = (ix, layer_idxs[ix], 0)\n solver_ug.tt.values[idx] = solver_dg.tt.values[idx]\n #print(idx, solver_dg.tt.values[idx])\n solver_ug.unknown[idx] = False\n solver_ug.trial.push(*idx)\n solver_ug.solve()\n \n return solver_ug.tt.values[:,0,0]", "def solve(self):\n \n raise NotImplementedError(\"not implemented!\")", "def solution(self) -> State:", "def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n locations = student_utils.convert_locations_to_indices(list_of_locations, list_of_locations)\n homes = student_utils.convert_locations_to_indices(list_of_homes, list_of_locations)\n start = list_of_locations.index(starting_car_location)\n\n start_time = time.time()\n\n if params[0] == 'naive':\n car_path, drop_off = naive_solver(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'greedy':\n car_path, drop_off = greedy_solver(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'three_opt':\n car_path, drop_off = three_opt_solver(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'ant_colony':\n car_path, drop_off = ant_colony(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'greedy_clustering_three_opt':\n car_path, drop_off = greedy_clustering_three_opt(locations, homes, start, adjacency_matrix, int(params[1]))\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'mst':\n car_path, drop_off = mst_solver(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'two_opt':\n car_path, drop_off = two_opt_solver(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'greedy_clustering_two_opt':\n car_path, drop_off = greedy_clustering_two_opt(locations, homes, start, adjacency_matrix, int(params[1]))\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n else:\n pass", "def block_solve_newton(\n r_j,\n A_j,\n a_1_j,\n a_2_j,\n m,\n b_j_init,\n C_j,\n I_j,\n ls_alpha=0.5,\n ls_beta=0.9,\n max_iters=20,\n tol=1e-8,\n verbose=False,\n):\n b_j = b_j_init\n k = 1\n pbar_stats = {} # stats for the progress bar\n pbar = tqdm.tqdm(\n desc=\"Solving block with Newton's method\", disable=not verbose, leave=False\n )\n\n while True:\n # First, compute the Newton step and decrement.\n q_b_j = r_j - A_j @ b_j\n b_j_norm = b_j.norm(p=2)\n grad_b_j = _grad_j(q_b_j, A_j, b_j, b_j_norm, a_1_j, a_2_j, m)\n hess_b_j = _hess_j(C_j, I_j, b_j, b_j_norm, a_1_j, a_2_j)\n hessinv_b_j = torch.inverse(hess_b_j)\n v_j = hessinv_b_j @ grad_b_j\n dec_j = grad_b_j @ (hessinv_b_j @ grad_b_j)\n\n # Check tolerance stopping criterion. Exit if dec_j / 2 is less than the\n # tolerance.\n if dec_j / 2 <= tol:\n break\n\n # Perform backtracking line search.\n t = 1\n f_b_j = _f_j(q_b_j, b_j_norm, a_1_j, a_2_j, m)\n k_j = grad_b_j @ v_j\n while True:\n # Compute the update and evaluate function at that point.\n bp_j = b_j - t * v_j\n q_bp_j = r_j - A_j @ bp_j\n bp_j_norm = bp_j.norm(p=2)\n f_bp_j = _f_j(q_bp_j, bp_j_norm, a_1_j, a_2_j, m)\n\n if f_bp_j <= f_b_j - ls_alpha * t * k_j:\n b_j = bp_j\n break\n t *= ls_beta\n\n # Make b_j non-zero if it is 0.\n if all(b_j.abs() < tol):\n b_j.fill_(1e-3)\n\n pbar_stats[\"t\"] = \"{:.2g}\".format(t)\n pbar_stats[\"1/2 newton decrement\"] = \"{:.2g}\".format(dec_j / 2)\n pbar.set_postfix(pbar_stats)\n pbar.update()\n\n # Check max iterations stopping criterion.\n if max_iters is not None and k == max_iters and k > 2:\n break\n k += 1\n\n pbar.close()\n return b_j", "def sparse_options(default_solver='spsolve',\n default_least_squares_solver='least_squares_lsmr' if HAVE_SCIPY_LSMR else 'least_squares_generic_lsmr',\n bicgstab_tol=1e-15,\n bicgstab_maxiter=None,\n spilu_drop_tol=1e-4,\n spilu_fill_factor=10,\n spilu_drop_rule='basic,area',\n spilu_permc_spec='COLAMD',\n spsolve_permc_spec='COLAMD',\n spsolve_keep_factorization=True,\n lgmres_tol=1e-5,\n lgmres_maxiter=1000,\n lgmres_inner_m=39,\n lgmres_outer_k=3,\n least_squares_lsmr_damp=0.0,\n least_squares_lsmr_atol=1e-6,\n least_squares_lsmr_btol=1e-6,\n least_squares_lsmr_conlim=1e8,\n least_squares_lsmr_maxiter=None,\n least_squares_lsmr_show=False,\n least_squares_lsqr_damp=0.0,\n least_squares_lsqr_atol=1e-6,\n least_squares_lsqr_btol=1e-6,\n least_squares_lsqr_conlim=1e8,\n least_squares_lsqr_iter_lim=None,\n least_squares_lsqr_show=False,\n pyamg_tol=1e-5,\n pyamg_maxiter=400,\n pyamg_verb=False,\n pyamg_rs_strength=('classical', {'theta': 0.25}),\n pyamg_rs_CF='RS',\n pyamg_rs_presmoother=('gauss_seidel', {'sweep': 'symmetric'}),\n pyamg_rs_postsmoother=('gauss_seidel', {'sweep': 'symmetric'}),\n pyamg_rs_max_levels=10,\n pyamg_rs_max_coarse=500,\n pyamg_rs_coarse_solver='pinv2',\n pyamg_rs_cycle='V',\n pyamg_rs_accel=None,\n pyamg_rs_tol=1e-5,\n pyamg_rs_maxiter=100,\n pyamg_sa_symmetry='hermitian',\n pyamg_sa_strength='symmetric',\n pyamg_sa_aggregate='standard',\n pyamg_sa_smooth=('jacobi', {'omega': 4.0/3.0}),\n pyamg_sa_presmoother=('block_gauss_seidel', {'sweep': 'symmetric'}),\n pyamg_sa_postsmoother=('block_gauss_seidel', {'sweep': 'symmetric'}),\n pyamg_sa_improve_candidates=[('block_gauss_seidel', {'sweep': 'symmetric', 'iterations': 4}), None],\n pyamg_sa_max_levels=10,\n pyamg_sa_max_coarse=500,\n pyamg_sa_diagonal_dominance=False,\n pyamg_sa_coarse_solver='pinv2',\n pyamg_sa_cycle='V',\n pyamg_sa_accel=None,\n pyamg_sa_tol=1e-5,\n pyamg_sa_maxiter=100):\n\n assert default_least_squares_solver.startswith('least_squares')\n\n opts = (('bicgstab_spilu', {'type': 'bicgstab_spilu',\n 'tol': bicgstab_tol,\n 'maxiter': bicgstab_maxiter,\n 'spilu_drop_tol': spilu_drop_tol,\n 'spilu_fill_factor': spilu_fill_factor,\n 'spilu_drop_rule': spilu_drop_rule,\n 'spilu_permc_spec': spilu_permc_spec}),\n ('bicgstab', {'type': 'bicgstab',\n 'tol': bicgstab_tol,\n 'maxiter': bicgstab_maxiter}),\n ('spsolve', {'type': 'spsolve',\n 'permc_spec': spsolve_permc_spec,\n 'keep_factorization': spsolve_keep_factorization}),\n ('lgmres', {'type': 'lgmres',\n 'tol': lgmres_tol,\n 'maxiter': lgmres_maxiter,\n 'inner_m': lgmres_inner_m,\n 'outer_k': lgmres_outer_k}),\n ('least_squares_lsqr', {'type': 'least_squares_lsqr',\n 'damp': least_squares_lsqr_damp,\n 'atol': least_squares_lsqr_atol,\n 'btol': least_squares_lsqr_btol,\n 'conlim': least_squares_lsqr_conlim,\n 'iter_lim': least_squares_lsqr_iter_lim,\n 'show': least_squares_lsqr_show}))\n\n if HAVE_SCIPY_LSMR:\n opts += (('least_squares_lsmr', {'type': 'least_squares_lsmr',\n 'damp': least_squares_lsmr_damp,\n 'atol': least_squares_lsmr_atol,\n 'btol': least_squares_lsmr_btol,\n 'conlim': least_squares_lsmr_conlim,\n 'maxiter': least_squares_lsmr_maxiter,\n 'show': least_squares_lsmr_show}),)\n\n if HAVE_PYAMG:\n opts += (('pyamg', {'type': 'pyamg',\n 'tol': pyamg_tol,\n 'maxiter': pyamg_maxiter}),\n ('pyamg-rs', {'type': 'pyamg-rs',\n 'strength': pyamg_rs_strength,\n 'CF': pyamg_rs_CF,\n 'presmoother': pyamg_rs_presmoother,\n 'postsmoother': pyamg_rs_postsmoother,\n 'max_levels': pyamg_rs_max_levels,\n 'max_coarse': pyamg_rs_max_coarse,\n 'coarse_solver': pyamg_rs_coarse_solver,\n 'cycle': pyamg_rs_cycle,\n 'accel': pyamg_rs_accel,\n 'tol': pyamg_rs_tol,\n 'maxiter': pyamg_rs_maxiter}),\n ('pyamg-sa', {'type': 'pyamg-sa',\n 'symmetry': pyamg_sa_symmetry,\n 'strength': pyamg_sa_strength,\n 'aggregate': pyamg_sa_aggregate,\n 'smooth': pyamg_sa_smooth,\n 'presmoother': pyamg_sa_presmoother,\n 'postsmoother': pyamg_sa_postsmoother,\n 'improve_candidates': pyamg_sa_improve_candidates,\n 'max_levels': pyamg_sa_max_levels,\n 'max_coarse': pyamg_sa_max_coarse,\n 'diagonal_dominance': pyamg_sa_diagonal_dominance,\n 'coarse_solver': pyamg_sa_coarse_solver,\n 'cycle': pyamg_sa_cycle,\n 'accel': pyamg_sa_accel,\n 'tol': pyamg_sa_tol,\n 'maxiter': pyamg_sa_maxiter}))\n opts = OrderedDict(opts)\n opts.update(genericsolvers.options())\n def_opt = opts.pop(default_solver)\n if default_least_squares_solver != default_solver:\n def_ls_opt = opts.pop(default_least_squares_solver)\n ordered_opts = OrderedDict(((default_solver, def_opt),\n (default_least_squares_solver, def_ls_opt)))\n else:\n ordered_opts = OrderedDict(((default_solver, def_opt),))\n ordered_opts.update(opts)\n return ordered_opts", "def refit_pll_opt(model,data):\n data = toPM(data);\n import scipy.optimize\n from scipy.sparse import triu\n def to_vector(L,h):\n return np.hstack((h,triu(L,k=1).tocoo().data))\n def from_vector(x):\n h = x[:len(model.h)];\n tmp = triu(model.L,k=1).tocoo();\n L = csr((x[len(model.h):],(tmp.row,tmp.col)),shape=model.L.shape)\n return L+L.T,h\n def f0(x0):\n L,h = from_vector(x0)\n return -__pll(L,h,data).mean()\n def jac(x0):\n L,h = from_vector(x0)\n return -to_vector(*__dpll(L,h,data))\n\n x0 = to_vector(model.L,model.h)\n res = scipy.optimize.minimize(f0,x0, method='BFGS',jac=jac)\n #print(\"Success? \",res.success)\n model.L,model.h = from_vector(res.x)\n return res", "def _solve(self) -> CasADiArrayType:\n solver_input = {\"x0\": self.x0, \"p\": self.p}\n if self.opt_type in CONSTRAINED_OPT:\n solver_input[\"lbg\"] = self._lbg\n solver_input[\"ubg\"] = self._ubg\n self._solution = self._solver(**solver_input)\n self._stats = self._solver.stats()\n self._stats[\"solution\"] = self._solution\n return self._solution[\"x\"]", "def Solve(self,iter_val=0):\n\n ### Save Files before solve ###\n self.fprint(\"Saving Input Data\",special=\"header\")\n if \"mesh\" in self.params.output:\n self.problem.dom.Save(val=iter_val)\n if \"initial_guess\" in self.params.output:\n self.problem.bd.SaveInitialGuess(val=iter_val)\n if \"height\" in self.params.output and self.problem.dom.dim == 3:\n self.problem.bd.SaveHeight(val=iter_val)\n if \"turbine_force\" in self.params.output:\n self.problem.farm.SaveRotorDisks(val=iter_val)\n self.fprint(\"Finished\",special=\"footer\")\n\n ####################################################################\n ### This is the better way to define a nonlinear problem but it \n ### doesn't play nice with dolfin_adjoint\n # ### Define Jacobian ###\n # dU = TrialFunction(self.problem.fs.W)\n # J = derivative(self.problem.F, self.problem.up_next, dU)\n\n # ### Setup nonlinear solver ###\n # nonlinear_problem = NonlinearVariationalProblem(self.problem.F, self.problem.up_next, self.problem.bd.bcs, J)\n # nonlinear_solver = NonlinearVariationalSolver(nonlinear_problem)\n\n # ### Set some parameters ###\n # solver_parameters = nonlinear_solver.parameters\n # solver_parameters[\"nonlinear_solver\"] = \"snes\"\n # solver_parameters[\"snes_solver\"][\"linear_solver\"] = \"mumps\"\n # solver_parameters[\"snes_solver\"][\"maximum_iterations\"] = 50\n # solver_parameters[\"snes_solver\"][\"error_on_nonconvergence\"] = False\n # solver_parameters[\"snes_solver\"][\"line_search\"] = \"bt\" # Available: basic, bt, cp, l2, nleqerr\n\n ### Solve the problem ###\n # self.fprint(\"Solving\",special=\"header\")\n # start = time.time()\n # iters, converged = nonlinear_solver.solve()\n # stop = time.time()\n # self.fprint(\"Total Nonlinear Iterations: {:d}\".format(iters))\n # self.fprint(\"Converged Successfully: {0}\".format(converged))\n ####################################################################\n\n\n nonlinear_solver = self.params[\"solver\"].get(\"nonlinear_solver\", \"snes\")\n relaxation = self.params[\"solver\"].get(\"newton_relaxation\", 1.0)\n\n self.fprint(\"Solving with {0}\".format(nonlinear_solver))\n if nonlinear_solver == \"newton\":\n self.fprint(\"Relaxation parameter = {: 1.2f}\".format(relaxation))\n\n newton_options = {\"relaxation_parameter\": relaxation,\n \"maximum_iterations\": 40,\n \"linear_solver\": \"mumps\",\n \"absolute_tolerance\": 1e-6,\n \"relative_tolerance\": 1e-5}\n \n solver_parameters = {\"nonlinear_solver\": \"newton\",\n \"newton_solver\": newton_options}\n\n elif nonlinear_solver == \"snes\":\n # ### Add some helper functions to solver options ###\n solver_parameters = {\"nonlinear_solver\": \"snes\",\n \"snes_solver\": {\n \"linear_solver\": \"mumps\", \n \"maximum_iterations\": 40,\n \"error_on_nonconvergence\": True,\n \"line_search\": \"bt\",\n }}\n \n else:\n raise ValueError(\"Unknown nonlinear solver type: {0}\".format(nonlinear_solver))\n\n ### Start the Solve Process ###\n self.fprint(\"Solving\",special=\"header\")\n start = time.time()\n \n # ### Solve the Baseline Problem ###\n # solve(self.problem.F_sans_tf == 0, self.problem.up_next, self.problem.bd.bcs, solver_parameters=solver_parameters, **self.extra_kwarg)\n\n # ### Store the Baseline and Assign for the real solve ###\n # self.up_baseline = self.problem.up_next.copy(deepcopy=True)\n # self.problem.up_next.assign(self.up_baseline)\n\n ### Solve the real problem ###\n solve(self.problem.F == 0, self.problem.up_next, self.problem.bd.bcs, solver_parameters=solver_parameters)\n stop = time.time()\n self.fprint(\"Solve Complete: {:1.2f} s\".format(stop-start),special=\"footer\")\n # self.u_next,self.p_next = self.problem.up_next.split(True)\n self.u_next,self.p_next = split(self.problem.up_next)\n # self.nu_T = project(self.problem.nu_T,self.problem.fs.Q,solver_type='mumps',**self.extra_kwarg)\n self.nu_T = None\n\n\n ### Save solutions ###\n if \"solution\" in self.params.output:\n self.fprint(\"Saving Solution\",special=\"header\")\n self.Save(val=iter_val)\n self.fprint(\"Finished\",special=\"footer\")\n\n ### calculate the power for each turbine ###\n ###################################\n ### Fix how angle is transfered ###\n ###################################\n if self.optimizing or self.save_power:\n self.J += -self.CalculatePowerFunctional((iter_val-self.problem.dom.init_wind)) \n\n # self.fprint(\"Speed Percent of Inflow Speed\")\n # ps = []\n # for i in range(6):\n # HH = self.problem.farm.HH[0]\n # RD = self.problem.farm.RD[0]\n # x_val = (i+1)*RD\n # vel = self.problem.up_next([x_val,0,HH])\n # vel = vel[0:3]\n # nom = np.linalg.norm(vel)\n # perc = nom/self.problem.bd.HH_vel\n # ps.append(perc)\n # self.fprint(\"Speed Percent at (\"+repr(int(x_val))+\", 0, \"+repr(HH)+\"): \"+repr(perc))\n # print(ps)", "def solve_puzzle(self):\r\n \r\n counter = 0\r\n rows = self._height-1\r\n cols = self._width-1\r\n # print rows, cols\r\n # print 'The greed has %s rows and %s coloumn indexes' %(rows, cols) \r\n solution_move = ''\r\n if self.get_number(0,0) == 0 and \\\r\n self.get_number(0,1) == 1:\r\n # print 'Congrads Puxxle is Aolved at start!!!!!'\r\n return ''\r\n #appropriate_number = (self._height * self._width) - 1\r\n appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'First appropriate_number=',appropriate_number\r\n # print \"Grid first tile that we will solwing has value =\", self._grid[rows][cols]\r\n \r\n while counter < 300:\r\n counter +=1\r\n # print self\r\n #appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'Appropriate number in loop=',appropriate_number\r\n # print 'We are solving %s index_row and %s index_col' %(rows, cols) \r\n ####Case when we use solve_interior_tile\r\n if rows > 1 and cols > 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n cols -= 1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solving interior tile', (rows, cols)\r\n solution_move += self.solve_interior_tile(rows, cols)\r\n # print 'Solution move=', solution_move\r\n cols -= 1\r\n #### Case when we use solve_col0_tile\r\n elif rows > 1 and cols == 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n cols = self._width-1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solwing tile 0 in row', rows\r\n # print 'Appropriate number here ='\r\n solution_move += self.solve_col0_tile(rows)\r\n # print 'Solution move=', solution_move\r\n rows -=1\r\n cols = self._width-1\r\n\r\n\r\n #### Cases when we use solve_row0_tile\r\n elif rows == 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n #cols = self._width-1\r\n appropriate_number -= self._width\r\n\r\n else:\r\n # print 'Solving upper 2 rows right side'\r\n solution_move += self.solve_row1_tile(cols)\r\n rows -=1\r\n appropriate_number -= self._width\r\n #### Cases when we use solve_row1_tile \r\n if rows < 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows += 1\r\n cols -= 1\r\n appropriate_number +=self._width-1\r\n else:\r\n # print '(1,J) tile solved, lets solwe tile (0,j) in tile',(rows,cols)\r\n # print 'Greed after move solve_row1_tile'\r\n # print self\r\n solution_move += self.solve_row0_tile(cols)\r\n rows +=1\r\n cols -=1\r\n appropriate_number +=self._width-1\r\n\r\n\r\n #### Case when we use solve_2x2\r\n elif rows <= 1 and cols <= 1:\r\n # print 'We are solving 2x2 puzzle'\r\n solution_move += self.solve_2x2()\r\n if self._grid[0][0] == 0 and \\\r\n self._grid[0][1] == 1:\r\n # print 'Congrads Puxxle is SOLVED!!!!!'\r\n break\r\n\r\n\r\n\r\n\r\n if counter > 100:\r\n # print 'COUNTER BREAK'\r\n break\r\n # print solution_move, len(solution_move)\r\n return solution_move\r\n\r\n\r\n\r\n\r\n\r\n\r\n # for row in solution_greed._grid[::-1]:\r\n # print solution_greed._grid\r\n # print 'Row =',row\r\n \r\n # if solution_greed._grid.index(row) > 1:\r\n # print \"Case when we solwing Interior and Tile0 part\"\r\n \r\n\r\n # for col in solution_greed._grid[solution_greed._grid.index(row)][::-1]:\r\n # print 'Coloumn value=', col\r\n #print row[0]\r\n # if col !=row[0]:\r\n # print 'Case when we use just Interior tile solution'\r\n # print solution_greed._grid.index(row)\r\n # print row.index(col)\r\n \r\n # solution += solution_greed.solve_interior_tile(solution_greed._grid.index(row) , row.index(col))\r\n # print 'Solution =', solution\r\n # print self \r\n # print solution_greed._grid\r\n # elif col ==row[0]:\r\n # print 'Case when we use just Col0 solution'\r\n\r\n # else:\r\n # print 'Case when we solwing first two rows'\r\n\r\n #return \"\"\r", "def putsolution(self,whichsol_,skc_,skx_,skn_,xc_,xx_,y_,slc_,suc_,slx_,sux_,snx_):\n if skc_ is not None:\n _skc_tmp = (ctypes.c_int32 * len(skc_))(*skc_)\n else:\n _skc_tmp = None\n if skx_ is not None:\n _skx_tmp = (ctypes.c_int32 * len(skx_))(*skx_)\n else:\n _skx_tmp = None\n if skn_ is not None:\n _skn_tmp = (ctypes.c_int32 * len(skn_))(*skn_)\n else:\n _skn_tmp = None\n if isinstance(xc_, numpy.ndarray) and xc_.dtype is numpy.dtype(numpy.float64) and xc_.flags.contiguous:\n _xc_copyarray = False\n _xc_tmp = ctypes.cast(xc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif xc_ is not None:\n _xc_copyarray = True\n _xc_np_tmp = numpy.zeros(len(xc_),numpy.dtype(numpy.float64))\n _xc_np_tmp[:] = xc_\n assert _xc_np_tmp.flags.contiguous\n _xc_tmp = ctypes.cast(_xc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _xc_copyarray = False\n _xc_tmp = None\n \n if isinstance(xx_, numpy.ndarray) and xx_.dtype is numpy.dtype(numpy.float64) and xx_.flags.contiguous:\n _xx_copyarray = False\n _xx_tmp = ctypes.cast(xx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif xx_ is not None:\n _xx_copyarray = True\n _xx_np_tmp = numpy.zeros(len(xx_),numpy.dtype(numpy.float64))\n _xx_np_tmp[:] = xx_\n assert _xx_np_tmp.flags.contiguous\n _xx_tmp = ctypes.cast(_xx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _xx_copyarray = False\n _xx_tmp = None\n \n if isinstance(y_, numpy.ndarray) and y_.dtype is numpy.dtype(numpy.float64) and y_.flags.contiguous:\n _y_copyarray = False\n _y_tmp = ctypes.cast(y_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif y_ is not None:\n _y_copyarray = True\n _y_np_tmp = numpy.zeros(len(y_),numpy.dtype(numpy.float64))\n _y_np_tmp[:] = y_\n assert _y_np_tmp.flags.contiguous\n _y_tmp = ctypes.cast(_y_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _y_copyarray = False\n _y_tmp = None\n \n if isinstance(slc_, numpy.ndarray) and slc_.dtype is numpy.dtype(numpy.float64) and slc_.flags.contiguous:\n _slc_copyarray = False\n _slc_tmp = ctypes.cast(slc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif slc_ is not None:\n _slc_copyarray = True\n _slc_np_tmp = numpy.zeros(len(slc_),numpy.dtype(numpy.float64))\n _slc_np_tmp[:] = slc_\n assert _slc_np_tmp.flags.contiguous\n _slc_tmp = ctypes.cast(_slc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _slc_copyarray = False\n _slc_tmp = None\n \n if isinstance(suc_, numpy.ndarray) and suc_.dtype is numpy.dtype(numpy.float64) and suc_.flags.contiguous:\n _suc_copyarray = False\n _suc_tmp = ctypes.cast(suc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif suc_ is not None:\n _suc_copyarray = True\n _suc_np_tmp = numpy.zeros(len(suc_),numpy.dtype(numpy.float64))\n _suc_np_tmp[:] = suc_\n assert _suc_np_tmp.flags.contiguous\n _suc_tmp = ctypes.cast(_suc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _suc_copyarray = False\n _suc_tmp = None\n \n if isinstance(slx_, numpy.ndarray) and slx_.dtype is numpy.dtype(numpy.float64) and slx_.flags.contiguous:\n _slx_copyarray = False\n _slx_tmp = ctypes.cast(slx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif slx_ is not None:\n _slx_copyarray = True\n _slx_np_tmp = numpy.zeros(len(slx_),numpy.dtype(numpy.float64))\n _slx_np_tmp[:] = slx_\n assert _slx_np_tmp.flags.contiguous\n _slx_tmp = ctypes.cast(_slx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _slx_copyarray = False\n _slx_tmp = None\n \n if isinstance(sux_, numpy.ndarray) and sux_.dtype is numpy.dtype(numpy.float64) and sux_.flags.contiguous:\n _sux_copyarray = False\n _sux_tmp = ctypes.cast(sux_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif sux_ is not None:\n _sux_copyarray = True\n _sux_np_tmp = numpy.zeros(len(sux_),numpy.dtype(numpy.float64))\n _sux_np_tmp[:] = sux_\n assert _sux_np_tmp.flags.contiguous\n _sux_tmp = ctypes.cast(_sux_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _sux_copyarray = False\n _sux_tmp = None\n \n if isinstance(snx_, numpy.ndarray) and snx_.dtype is numpy.dtype(numpy.float64) and snx_.flags.contiguous:\n _snx_copyarray = False\n _snx_tmp = ctypes.cast(snx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif snx_ is not None:\n _snx_copyarray = True\n _snx_np_tmp = numpy.zeros(len(snx_),numpy.dtype(numpy.float64))\n _snx_np_tmp[:] = snx_\n assert _snx_np_tmp.flags.contiguous\n _snx_tmp = ctypes.cast(_snx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _snx_copyarray = False\n _snx_tmp = None\n \n res = __library__.MSK_XX_putsolution(self.__nativep,whichsol_,_skc_tmp,_skx_tmp,_skn_tmp,_xc_tmp,_xx_tmp,_y_tmp,_slc_tmp,_suc_tmp,_slx_tmp,_sux_tmp,_snx_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def solve(self):\n print(\"Attempting to solve problem instance with {} constraints\".format(len(self.constraints)))\n self.formulation.solve(solver='SCS')\n print(self.formulation.status)", "def solve(self):\n print(\"Attempting to solve problem instance with {} constraints\".format(len(self.constraints)))\n self.formulation.solve(solver='SCS')\n print(self.formulation.status)", "def solve(self):\n print(\"Attempting to solve problem instance with {} constraints\".format(len(self.constraints)))\n self.formulation.solve(solver='SCS')\n print(self.formulation.status)", "def solve(self):\n raise NotImplementedError(\"This method needs to be implemented.\")", "def solve(self):\n start = timer()\n # encode into milp\n me = MILPEncoder(MILPSolver.prob,\n MILPSolver.params.logger.LOGFILE, \n MILPSolver.params.INTRA_DEP_CONSTRS,\n MILPSolver.params.INTER_DEP_CONSTRS)\n if MILPSolver.lp == True:\n gmodel = me.lp_encode()\n else:\n gmodel = me.encode()\n # Set gurobi parameters\n pgo = 1 if MILPSolver.params.PRINT_GUROBI_OUTPUT == True else 0\n gmodel.setParam('OUTPUT_FLAG', pgo)\n tl = MILPSolver.params.TIMEOUT\n if tl != -1 : gmodel.setParam('TIME_LIMIT', tl)\n if not MILPSolver.params.DEFAULT_CUTS: \n MILPSolver.disable_default_cuts(gmodel)\n gmodel._vars = gmodel.getVars()\n # set callback cuts \n MILPSolver.id_form = IdealFormulation(MILPSolver.prob,\n gmodel, \n MILPSolver.params.IDEAL_FREQ,\n MILPSolver.params.logger.LOGFILE)\n MILPSolver.dep_cuts = DepCuts(MILPSolver.prob,\n gmodel,\n MILPSolver.params.DEP_FREQ,\n MILPSolver.params.INTRA_DEP_CUTS,\n MILPSolver.params.INTER_DEP_CUTS,\n MILPSolver.sip_params,\n MILPSolver.params.logger.LOGFILE)\n # Optimise\n if MILPSolver.params.callback_enabled() and MILPSolver.lp == False:\n gmodel.optimize(MILPSolver._callback)\n else:\n gmodel.optimize()\n\n runtime = timer() - start\n cex = None \n if MILPSolver.status == SolveResult.BRANCH_THRESHOLD:\n result = SolveResult.BRANCH_THRESHOLD\n elif gmodel.status == GRB.OPTIMAL:\n cex_shape = MILPSolver.prob.spec.input_layer.input_shape\n cex = np.zeros(cex_shape)\n for i in itertools.product(*[range(j) for j in cex_shape]):\n cex[i] = MILPSolver.prob.spec.input_layer.out_vars[i].x\n result = SolveResult.UNSATISFIED\n elif gmodel.status == GRB.TIME_LIMIT:\n result = SolveResult.TIMEOUT\n elif gmodel.status == GRB.INTERRUPTED:\n result = SolveResult.INTERRUPTED\n elif gmodel.status == GRB.INFEASIBLE or gmodel.status == GRB.INF_OR_UNBD:\n result = SolveResult.SATISFIED\n else:\n result = SolveResult.UNKNOWN\n \n # MILPSolver.logger.info('Verification problem {} solved, '\n # 'LP: {}, '\n # 'time: {:.2f}, '\n # 'result: {}.'\n # .format(MILPSolver.prob.id,\n # MILPSolver.lp,\n # runtime,\n # result.value))\n \n return SolveReport(result, runtime, cex)", "def compute(self):\n\n self.setd = []\n self.satc = [False for cl in self.soft] # satisfied clauses\n self.solution = None\n self.bb_assumps = [] # backbone assumptions\n self.ss_assumps = [] # satisfied soft clause assumptions\n\n if self.oracle.solve():\n # hard part is satisfiable => there is a solution\n self._filter_satisfied(update_setd=True)\n self._compute()\n\n self.solution = list(map(lambda i: i + 1, filter(lambda i: not self.satc[i], range(len(self.soft)))))\n\n return self.solution", "def solve(self, regparam):\n self.regparam = regparam\n \n #Some counters for bookkeeping\n self.stepcounter = 0\n self.flipcounter = 0\n self.nochangecounter = 0\n \n #Cached results\n self.evals = np.multiply(self.svals, self.svals)\n self.newevals = 1. / (self.evals + self.regparam)\n newevalslamtilde = np.multiply(self.evals, self.newevals)\n self.D = np.sqrt(newevalslamtilde)\n #self.D = -newevalslamtilde\n \n self.VTY = self.svecs.T * self.Y\n DVTY = np.multiply(self.D.T, self.svecs.T * self.Y)\n \n #Using lists in order to avoid unnecessary matrix slicings\n self.DVTY_list = []\n self.YTVDDVTY_list = []\n self.classFitnessList = []\n for i in range(self.labelcount):\n DVTY_i = DVTY[:,i]\n self.DVTY_list.append(DVTY_i)\n YTVDDVTY_i = DVTY_i.T * DVTY_i\n self.YTVDDVTY_list.append(YTVDDVTY_i)\n fitness_i = self.size - DVTY_i.T * DVTY_i\n self.classFitnessList.append(fitness_i)\n \n self.Dsvecs_list = []\n self.svecsDDsvecs_list = []\n for i in range(self.size):\n Dsvec = np.multiply(self.D.T, self.svecs[i].T)\n self.Dsvecs_list.append(Dsvec)\n self.svecsDDsvecs_list.append(Dsvec.T*Dsvec)\n \n self.updateA()\n \n \n converged = False\n print(self.classcounts.T)\n if self.callbackfun is not None:\n self.callbackfun.callback(self)\n while True:\n \n converged = self.roundRobin()\n print(self.classcounts.T)\n if self.callbackfun is not None:\n self.callbackfun.callback(self)\n if converged: break\n \n if self.oneclass:\n self.Y = self.Y[:, 0]\n self.A = self.A[:, 0]\n self.results['predicted_clusters_for_training_data'] = self.Y\n self.predictor = self.svdad.createModel(self)", "def solute(self, puzzle):\r\n \"\"\"suppose that ax = c, where a is a matrix, c and x are vectors.\"\"\"\r\n \"\"\"The aim is to figure out x, which indicates the solution.\"\"\"\r\n A, a, c = [], [], []\r\n for i in range(puzzle.row):\r\n for j in range(puzzle.column):\r\n # create a puzzle.row * puzzle.column by puzzle.row * puzzle.column matrix.\r\n # each column represents a cell in the puzzle.\r\n # each row represents the changed cell if column c is selected.\r\n if puzzle.lights[i][j] == -1:\r\n c.append(1)\r\n else:\r\n c.append(0)\r\n for m in range(puzzle.row):\r\n for n in range(puzzle.column):\r\n if self.is_adjecent([m, n], [i, j]):\r\n # if [m, n] is adjecent to [i, j], then a[ij][mn] should be 1.\r\n a.append(1)\r\n else:\r\n a.append(0)\r\n a.append(c[i * puzzle.column + j])\r\n A.append(a)\r\n a = []\r\n\r\n self.eliminate(A)\r\n x = [item[len(item) - 1] for item in A]\r\n # x is the last column of A.\r\n # if x[i] is 1, cell i should be selected.\r\n i = 0\r\n for m in range(puzzle.row):\r\n for n in range(puzzle.column):\r\n if x[i] == 1:\r\n puzzle.selection.add((m, n))\r\n i += 1\r\n\r\n return puzzle.selection", "def __compute_s_block(self, s_block, available_pins, all_pins):\n for pin in all_pins:\n # make sure pins only get connected once\n # and pins connected to simulators get skipped\n if (\n pin._isconnected(include_simulators=False)\n and pin in available_pins\n and pin._connection in available_pins\n ):\n # the pin indices in available_pins lines up with the row/column\n # indices in the matrix. as the matrix shrinks, we remove pins\n # from available_pins so the indices always line up\n k = available_pins.index(pin)\n l = available_pins.index(pin._connection)\n\n s_block = innerconnect_s(s_block, k, l)\n\n available_pins.remove(pin)\n available_pins.remove(pin._connection)\n return s_block", "def form(func, dist_list, init_search_point, alg):\n \n def SLSQP(func, dist_list, init_search_point):\n \n dim = len(dist_list)\n current_beta = 0\n new_beta = 1\n sig = np.empty((1, dim))\n mu = np.empty((1, dim))\n new_search_point = np.array(init_search_point).reshape((1, dim))\n \n def f_l(x_l):\n return(func([x_l[i,:]*sig[0,i] + mu[0,i] for i in range(0, dim)]))\n \n while abs(current_beta-new_beta) > 0.001:\n current_search_point = new_search_point\n current_beta = new_beta\n for i in range(0, dim):\n if dist_list[i][1] != 'norm':\n mu[0,i], sig[0, i] = Rosenblatt_Transform(dist_list[i][0], current_search_point[0,i])\n else:\n mu[0,i], sig[0, i] = dist_list[i][0].mean(), dist_list[i][0].std()\n \n dist_fun = lambda u: np.linalg.norm(u) \n \n alg = 'SLSQP'\n \n H = lambda u: f_l(u)\n cons = ({'type': 'eq', 'fun': lambda u: -(H(u.reshape(-1,1)))})\n \n result = scipy.optimize.minimize(dist_fun, x0 = current_search_point, constraints = cons, method=alg)\n \n new_beta = result.fun\n u = np.array(result.x).reshape((1,dim))\n \n new_search_point = np.empty((1, dim))\n for i in range(0, dim):\n new_search_point[0,i] = mu[0,i] + u[0,i]*sig[0,i]\n \n beta_value = new_beta \n p_f = sst.norm.cdf(-beta_value)\n iterations = result.nit\n u = result.x\n x = u[:]*sig[0,:] + mu[0,:]\n print(x)\n grad_val = scipy.optimize.approx_fprime(x, func, 0.00000001)\n grad_val = grad_val.reshape((1, dim))\n \n sum1 = np.sum((grad_val[0,:]**2)*(sig[0,:]**2))\n cosines = np.empty((1, dim))\n \n for i in range(0, dim):\n cosines[0,i] = grad_val[0,i]*sig[0,i]/np.sqrt(sum1) \n \n print('------------------------')\n print('First-Order Reliability Analysis')\n print('Algorithm: slsqp solver')\n print('Iterations: {}\\nReliability index = {}\\nProbability of failure = {}'.format(iterations, beta_value, p_f))\n print('------------------------')\n \n return(beta_value, p_f, x, u, mu, sig, cosines) \n \n def HL_R(func, dist_list, init_search_point):\n \n iterations = 0\n cur_beta = 3\n new_beta = 0\n dim = len(dist_list)\n global_mean_arr = np.empty((1, dim))\n global_std_arr = np.empty((1, dim))\n new_search_point = np.array(init_search_point).reshape((1, dim))\n \n while abs(cur_beta - new_beta) > 0.001:\n cur_beta = new_beta\n cur_cosines = np.zeros((1, dim))\n new_cosines = np.ones((1, dim))\n \n while max((abs(cur_cosines - new_cosines))[0]) > 0.005:\n \n cur_cosines = new_cosines\n \n cur_search_point = new_search_point\n \n for i in range(0, dim):\n if dist_list[i][1] != 'norm':\n global_mean_arr[0, i], global_std_arr[0, i] = Rosenblatt_Transform(dist_list[i][0], cur_search_point[0,i])\n else:\n global_mean_arr[0, i], global_std_arr[0, i] = dist_list[i][0].mean(), dist_list[i][0].std()\n \n \n grad_val = scipy.optimize.approx_fprime(cur_search_point[0], func, 0.00000001)\n grad_val = grad_val.reshape((1, dim))\n \n sum1 = np.sum((grad_val[0,:]**2)*(global_std_arr[0,:]**2))\n cosines = np.empty((1, dim))\n \n for i in range(0, dim):\n cosines[0,i] = grad_val[0,i]*global_std_arr[0,i]/np.sqrt(sum1)\n \n new_cosines = cosines\n new_search_point = np.empty((1, dim))\n for i in range(0, dim):\n new_search_point[0,i] = global_mean_arr[0,i] - new_cosines[0,i]*global_std_arr[0,i]*cur_beta\n \n iterations = iterations + 1\n \n \n B = Symbol('B')\n coordinates = []\n for i in range(0, dim):\n coordinates.append(global_mean_arr[0, i] - new_cosines[0,i]*global_std_arr[0, i]*B)\n new_beta = float(solve(func(coordinates), B)[0])\n \n cosines = new_cosines \n beta_value = new_beta\n p_f = sst.norm.cdf(-new_beta)\n x = new_search_point\n u = (x[0,:] - global_mean_arr[0,:])/global_std_arr\n \n print('-------------------------')\n print('First-Order Reliability Analysis')\n print('Algorithm: HL-R solver')\n print('Iterations: {}\\nReliability index = {}\\nProbability of failure = {}'.format(iterations, beta_value, p_f))\n print('-------------------------')\n \n return(beta_value, p_f, x, u, global_mean_arr, global_std_arr, cosines)\n \n def HL_RF(func, dist_list, init_search_point):\n\n cur_beta = 3\n new_beta = 0\n dim = len(dist_list)\n\n new_search_point = np.array(init_search_point).reshape((1, dim))\n iterations = 0\n while abs(cur_beta - new_beta) > 0.001 and abs(func(new_search_point[0])) > 0.001:\n global_mean_arr = np.empty((1, dim))\n global_std_arr = np.empty((1, dim))\n cur_beta = new_beta\n cur_search_point = new_search_point\n \n for i in range(0, dim):\n if dist_list[i][1] != 'norm':\n global_mean_arr[0,i], global_std_arr[0, i] = Rosenblatt_Transform(dist_list[i][0], cur_search_point[0,i])\n else:\n global_mean_arr[0,i], global_std_arr[0, i] = dist_list[i][0].mean(), dist_list[i][0].std()\n \n f_val = func(cur_search_point[0])\n \n x_ast = np.empty((1, dim))\n for i in range(0, dim):\n x_ast[0,i] =(cur_search_point[0,i] - global_mean_arr[0,i])/global_std_arr[0,i]\n\n grad_val = scipy.optimize.approx_fprime(cur_search_point[0], func, 0.000001)\n grad_val = grad_val.reshape((1, dim)) \n \n grad_val_ast = np.empty(grad_val.shape)\n for i in range(0, dim):\n grad_val_ast[0,i] = grad_val[0,i]*global_std_arr[0,i]\n \n t1 = 1/np.sum(grad_val_ast[0,:]**2)\n\n t2 = sum(grad_val_ast[0,:]*x_ast[0,:]) - f_val\n \n t3 = t1*t2\n \n new_x_ast = np.empty(x_ast.shape)\n for i in range(0, dim):\n new_x_ast[0,i] = t3*grad_val_ast[0,i]\n u = new_x_ast\n new_beta = np.linalg.norm(new_x_ast)\n \n new_search_point = np.empty((1, dim))\n for i in range(0, dim):\n new_search_point[0,i] = new_x_ast[0,i]*global_std_arr[0,i] + global_mean_arr[0,i]\n iterations = iterations + 1\n \n grad_val_ast_sum = sum(grad_val_ast[0,:]**2)\n cosines = grad_val_ast/(grad_val_ast_sum**0.5)\n beta_value = new_beta\n x = new_search_point\n p_f = sst.norm.cdf(-beta_value)\n \n print('-------------------------')\n print('First-Order Reliability Analysis')\n print('Algorithm: HL-RF solver')\n print('Iterations: {}\\nReliability index = {}\\nProbability of failure = {}'.format(iterations, beta_value, p_f))\n print('-------------------------')\n \n return(beta_value, p_f, x, u, global_mean_arr, global_std_arr, cosines)\n \n if alg == 'slsqp':\n return(SLSQP(func, dist_list, init_search_point))\n elif alg == 'HL-R':\n return(HL_R(func, dist_list, init_search_point))\n elif alg == 'HL-RF':\n return(HL_RF(func, dist_list, init_search_point))", "def solve(self):\n initial_fes = eades(self.graph, self.force_forward_edges)\n initial_fes_vec = self.edge_vector(initial_fes)\n\n # bounds for the objective\n lower_bound = 0\n upper_bound = np.sum(initial_fes_vec @ self.weights)\n\n self.logger.info('Calculating FES for graph with %d edges, max %d feedback edges', self.m, len(initial_fes))\n\n simple_cycles = set(induced_cycles(self.graph, initial_fes))\n\n for iteration in itertools.count(1):\n self.logger.info('Baharev iteration %d, %g <= objective <= %g, %d simple cycles', iteration, lower_bound,\n upper_bound, len(simple_cycles))\n\n # Formulate and solve the problem for this iteration:\n y = cp.Variable(self.m, boolean=True, name=\"y\")\n objective = cp.Minimize(cp.sum(y @ self.weights))\n\n cycle_vectors = [self.edge_vector(nx.utils.pairwise(cycle)) for cycle in simple_cycles]\n constraints = [cp.sum(a @ y) >= 1 for a in cycle_vectors]\n constraints.append(cp.sum(y @ self.force_forward_vec) == 0) # no force forward vec may be in the result set\n problem = cp.Problem(objective, constraints)\n resolution = problem.solve(**self.solver_args)\n if problem.status != 'optimal':\n self.logger.warning('Optimization solution is %s. Try solver != %s?', problem.status,\n problem.solver_stats.solver_name)\n self.logger.debug(\n \"Solved optimization problem with %d constraints: %s -> %s (%g + %g seconds, %d iterations, solver %s)\",\n len(constraints), resolution, problem.solution.status,\n problem.solver_stats.solve_time or 0, problem.solver_stats.setup_time or 0,\n problem.solver_stats.num_iters or 0, problem.solver_stats.solver_name)\n current_solution = np.abs(y.value) >= 0.5 # y.value = vector of floats each ≈ 0 or 1\n current_fes = self.edges_for_vector(current_solution)\n self.logger.debug('Iteration %d, resolution: %s, %d feedback edges', iteration, resolution,\n len(current_fes))\n # S, the feedback edge set calculated using the constraint subset, can be an incomplete solution\n # (i.e. cycles remain after removing S from the graph). So lets compare this with the upper bound\n # from the heuristic\n lower_bound = max(lower_bound, objective.value)\n if lower_bound == upper_bound:\n self.logger.info('upper == lower bound == %g, optimal solution found', lower_bound)\n break # y.value is the optimal solution\n\n if resolution > upper_bound:\n self.logger.error('Solution %g > upper bound %g!', resolution, upper_bound)\n break\n\n Gi = self.graph.copy()\n Gi.remove_edges_from(current_fes)\n if nx.is_directed_acyclic_graph(Gi):\n self.logger.info('Graph is acyclic, optimal solution found')\n break # y.value is the optimal solution\n\n # The solution is not yet ideal. So we take G^(i), the graph still containing some feedback edges,\n # calculate a heuristic on it and use the heuristic (= over-estimation) to adjust upper bound and\n # determine additional simple cycles (= constraints)\n Fi = eades(Gi, self.force_forward_edges)\n yi = self.edge_vector(Fi) | current_solution\n zi = np.sum(yi @ self.weights)\n if zi < upper_bound:\n upper_bound = zi\n current_solution = yi\n simple_cycles |= set(induced_cycles(Gi, Fi))\n\n self.solution_vector = current_solution\n self.solution = self.edges_for_vector(current_solution)\n self.objective = objective.value\n self.iterations = iteration\n self.simple_cycles = simple_cycles\n return self.solution", "def __init__(self, \n nd = 2, \n goal = np.array([1.0,1.0]),\n state_bound = [[0,1],[0,1]],\n nA = 4,\n action_list = [[0,1],[0,-1],[1,0],[-1,0]],\n<<<<<<< HEAD:archive-code/puddleworld.py\n ngrid = [10.0,10.0],\n maxStep = 40):\n ngrid = [40, 40]\n x_vec = np.linspace(0,1,ngrid[0])\n y_vec = np.linspace(0,1,ngrid[1])\n for x in x_vec:\n for y in y_vec:\n if ~self.inPuddle([x,y]):\n puddle.append([x,y])\n # puddle is a closed loop \n outpuddlepts = np.asarray(puddle)\n \"\"\"\n\n\n # Horizontal wing of puddle consists of \n # 1) rectangle area xch1<= x <=xc2 && ych1-radius <= y <=ych2+radius\n # (xchi,ychi) is the center points (h ==> horizantal)\n # x, y = state[0], state[1]\n xch1, ych1 = 0.3, 0.7\n xch2, ych2 = 0.65, ych1\n radius = 0.1\n\n\n #Vertical wing of puddle consists of \n # 1) rectangle area xcv1-radius<= x <=xcv2+radius && ycv1 <= y <= ycv2\n # where (xcvi,ycvi) is the center points (v ==> vertical)\n xcv1 = 0.45; ycv1=0.4;\n xcv2 = xcv1; ycv2 = 0.8;\n\n # % 2) two half-circle at end edges of rectangle\n \n # POINTS ON HORIZANTAL LINES OF PUDDLE BOUNDARY\n for x in np.arange(xch1,xcv1-radius,self.meshsize[0]/2):\n puddle.append([x,ych1-radius])\n puddle.append([xcv1-radius,ych1-radius])\n \n for x in np.arange(xcv1+radius,xch2,self.meshsize[0]/2):\n puddle.append([x,ych1-radius])\n \n for x in np.arange(xch1,xcv1-radius,self.meshsize[0]/2):\n puddle.append([x,ych1+radius])\n \n puddle.append([xcv1-radius,ych1+radius])\n\n\n for x in np.arange(xcv1+radius,xch2,self.meshsize[0]/2):\n puddle.append([x,ych1+radius])\n\n # POINTS ON VERTICAL LINES OF PUDDLE BOUNDARY\n for y in np.arange(ycv1,ych1-radius,self.meshsize[1]/2):\n puddle.append([xcv1-radius,y])\n \n for y in np.arange(ycv1,ych1-radius,self.meshsize[1]/2):\n puddle.append([xcv1+radius,y])\n \"\"\"\n for y in np.arrange():\n puddle.append([])\n \n for y in np.arrange():\n puddle.append([])\n \"\"\"\n\n # HALF CIRCLES\n ngridTheta = 10\n thetaVec = np.linspace(0,pi,ngridTheta)\n\n for t in thetaVec:\n puddle.append([xch1+radius*np.cos(pi/2+t),ych1+radius*np.sin(pi/2+t)])\n\n for t in thetaVec:\n puddle.append([xch2+radius*np.cos(-pi/2+t),ych2+radius*np.sin(-pi/2+t)])\n\n for t in thetaVec:\n puddle.append([xcv1+radius*np.cos(pi+t),ycv1+radius*np.sin(pi+t)])\n\n for t in thetaVec:\n puddle.append([xcv2+radius*np.cos(t),ycv2+radius*np.sin(t)])\n\n \n outpuddlepts = np.asarray(puddle)\n return outpuddlepts", "def smooth_input(xs, ys, L):\n n = len(xs)\n\n # obj = [1 for i in range(n)]\n # for i in range(2 * n):\n # obj.append(0)\n\n # Create the model\n model = LpProblem(name=\"small-problem\", sense=LpMinimize)\n ws = [LpVariable(name=\"w_{}\".format(i), lowBound=0, upBound=1) for i in range(n)]\n ls = [LpVariable(name=\"L_{}\".format(i), lowBound=0) for i in range(n)]\n zs = [LpVariable(name=\"z_{}\".format(i)) for i in range(n)]\n\n # objective\n model += lpSum(ws)\n\n # constraint 1:\n # sum of Li <= L\n model += (lpSum(ls) <= L * n, \"sum of Li <= L\")\n\n # Constraint 2:\n # w_i >= |z_i - y_i|\n for i in range(n):\n model += (ws[i] + zs[i] >= ys[i], \"C2.a_{}\".format(i))\n model += (ws[i] - zs[i] >= -ys[i], \"C2.b_{}\".format(i))\n\n # Constraint 3\n # |z_i - z_j| <= L_i * dist(x_i, x_j)\n for i in range(n):\n for j in range(n):\n if i != j:\n model += (zs[i] - zs[j] - abs(xs[i] - xs[j]) * ls[i] <= 0, \"C3.a_{}_{}\".format(i, j))\n model += (zs[j] - zs[i] - abs(xs[i] - xs[j]) * ls[i] <= 0, \"C3.b_{}_{}\".format(i, j))\n\n if model.solve() == 1:\n print(\n \"------------------------------------\\nFound solution for the linear program\\n------------------------------------\\n\")\n return [[xs[i], zs[i].value()] for i in range(n)]\n # return [zi.value() for zi in zs], [li.value() for li in ls]\n\n print(\"Linear program: no solution found\")\n exit(1)\n return -1", "def set_up_linear_system(self):\r\n \r\n import numpy as np\r\n import copy\r\n \r\n # Find all elements which require the solver\r\n # First, find all elements which are either Line Sinks, Doublets, or Inhomogeneities\r\n part_of_solver = [(isinstance(e, ElementHeadBoundary) or isinstance(e, ElementNoFlowBoundary) or isinstance(e, ElementInhomogeneity)) for e in self.elementlist]\r\n # Only keep the elements which must be part of the linear system...\r\n part_of_solver = [idx for idx,val in enumerate(part_of_solver) if val]\r\n # ...and prepare a second set of indices for its complement\r\n not_part_of_solver = [i for i in np.arange(len(self.elementlist)) if i not in part_of_solver]\r\n \r\n # These elements invariably consist of segments - find out how many there are in total\r\n num_segments = np.sum([self.elementlist[idx].segments for idx in part_of_solver])\r\n \r\n # =====================================================================\r\n # Now create the matrix\r\n # =====================================================================\r\n \r\n # Pre-allocate arrays for the linear solver\r\n matrix = np.zeros((num_segments,num_segments))\r\n \r\n # The counter will keep track at what row we are\r\n row = 0\r\n \r\n # Go through all elements\r\n for i in part_of_solver:\r\n \r\n # Find the corresponding element\r\n e = self.elementlist[i]\r\n \r\n # We need a second counter for the columns\r\n col = 0\r\n \r\n # e is the element we are currently looking at - the row -, now we \r\n # must go through all other elements which are part of the solver\r\n # and check what they contribute to the control points of this element\r\n for i2 in part_of_solver:\r\n \r\n # Find the corresponding element\r\n e2 = self.elementlist[i2]\r\n \r\n # If the row element is a HeadLineSink, we must extract potentials\r\n if isinstance(e, ElementHeadBoundary):\r\n \r\n # Evaluate the contributions of this element to the control points\r\n if e != e2:\r\n block = e2.evaluate(\r\n z = e.zc,\r\n detailed = True,\r\n override_parameters = True).T\r\n else:\r\n block = e2.evaluate(\r\n z = e.zc,\r\n detailed = True,\r\n override_parameters = True,\r\n evaluate_self = True).T\r\n \r\n \r\n elif isinstance(e, ElementNoFlowBoundary):\r\n \r\n # Evaluate the contributions of this element to the control points\r\n block = e2.evaluate_gradient(\r\n z = e.zc,\r\n detailed = True,\r\n derivatives = 'phi',\r\n override_parameters = True).T\r\n \r\n # Project the partial derivatives onto the normal vector\r\n # The projection is a->b = <a,b>/||b||^2*b\r\n # Let's try it with the inner product instead\r\n # The normal vector is already normalized\r\n \r\n # We should have as many normal vectors as we have control points\r\n # Go through them all, and project each gradient onto the normal vector\r\n for idx,nv in enumerate(e.segment_nvec):\r\n \r\n # Calculate the inner product between the returned partial\r\n # derivatives and the segment's normal vector\r\n block[idx,:] = np.inner(\r\n np.column_stack(( \r\n np.real(block[idx,:]),\r\n np.imag(block[idx,:]) )),\r\n np.asarray([np.real(nv),np.imag(nv)]).T )[:,0]\r\n \r\n elif isinstance(e, ElementInhomogeneity):\r\n \r\n # If this inhomogeneity evaluates itself\r\n if i == i2:\r\n \r\n # Retrieve own matrix contribution\r\n block = copy.copy(e2.block)\r\n \r\n # This contribution is incomplete, subtract A_star from\r\n # its diagonal\r\n \r\n # Prepare a vector of outside conductivities; all are\r\n # the background conductivity by default\r\n for e3 in self.elementlist:\r\n if isinstance(e3, ElementMoebiusBase) or isinstance(e3, ElementUniformBase):\r\n A_star = np.ones(e2.zc.shape)*e3.k/(e2.k - e3.k)\r\n \r\n # Get add matrix\r\n addmat = np.identity(block.shape[0])\r\n np.fill_diagonal(addmat,A_star)\r\n \r\n # Subtract it from the retrieved block\r\n block -= addmat\r\n \r\n else:\r\n \r\n # Evaluate the contributions of this element to the control points\r\n block = e2.evaluate(\r\n z = e.zc,\r\n detailed = True,\r\n override_parameters = True).T\r\n \r\n # Write this block into the matrix\r\n matrix[row:row+e.segments,col:col+e2.segments] = copy.copy(np.real(block))\r\n \r\n # Update the column counter\r\n col += e2.segments\r\n \r\n # Update the row counter\r\n row += e.segments\r\n \r\n # =====================================================================\r\n # Now create the solution_vector\r\n # =====================================================================\r\n \r\n # Pre-allocate spac efor the solution vector\r\n solution_vector = np.zeros(num_segments)\r\n \r\n # The counter will keep track at what row we are\r\n counter = 0\r\n \r\n # Go through all elements\r\n for i in part_of_solver:\r\n \r\n # Find the corresponding element\r\n e = self.elementlist[i]\r\n \r\n # If the element is a HeadLineSink, we must assign the difference\r\n # between the head target and the background contributions\r\n if isinstance(e, ElementHeadBoundary):\r\n \r\n # Step 1: Assign the head targets\r\n solution_vector[counter:counter+e.segments] = \\\r\n copy.copy(e.phi_target)\r\n # solution_vector[counter:counter+e.segments] = \\\r\n # copy.copy(e.head_target)\r\n \r\n # # Step 2: Background potential --------------------------------\r\n # solution_vector[counter:counter+e.segments] -= \\\r\n # np.real(self.evaluate(e.zc))\r\n \r\n # Step 3: All elements ----------------------------------------\r\n for idx in not_part_of_solver:\r\n solution_vector[counter:counter+e.segments] -= \\\r\n np.real(self.elementlist[idx].evaluate(e.zc))\r\n \r\n # If the element is a no-flow boundary, we must assign the difference\r\n # between the head target and the background contributions\r\n if isinstance(e, ElementNoFlowBoundary):\r\n \r\n # # Step 1: Background gradient ---------------------------------\r\n # temp = self.evaluate_gradient(e.zc,derivatives='phi')\r\n \r\n # Step 2: Gradients from all elements -------------------------\r\n temp = np.zeros(e.zc.shape,dtype=np.complex)\r\n for idx in not_part_of_solver:\r\n temp += \\\r\n self.elementlist[idx].evaluate_gradient(e.zc,derivatives='phi')\r\n \r\n # Step 3: Project gradients onto normal vector ----------------\r\n for ix,nv in enumerate(e.segment_nvec):\r\n solution_vector[counter+ix] = \\\r\n -np.inner(\r\n np.asarray([np.real(nv),np.imag(nv)])[:,0],\r\n np.asarray([np.real(temp[ix]),np.imag(temp[ix])]) )\r\n \r\n # If the element is an Inhomogeneity, we must simply assign the potentials\r\n # induced by other elements\r\n if isinstance(e, ElementInhomogeneity):\r\n \r\n # # Step 1: Background potential --------------------------------\r\n # solution_vector[counter:counter+e.segments] -= \\\r\n # np.real(self.evaluate(e.zc))\r\n \r\n # Step 2: All elements ----------------------------------------\r\n for idx in not_part_of_solver:\r\n solution_vector[counter:counter+e.segments] -= \\\r\n np.real(self.elementlist[idx].evaluate(e.zc))\r\n \r\n # Update the counter\r\n counter += e.segments\r\n \r\n self.matrix = matrix\r\n self.solvec = solution_vector\r\n \r\n return matrix, solution_vector", "def actualSolve(self, lp):\n\t\tif lp.isMIP() and self.mip: return self.solve_CBC(lp)\n\t\telse: return self.solve_CLP(lp)", "def _get_solution(self, x_0, sol, k_fb, k_fb_perf_0, sol_verbose=False,\n crashed=False, feas_tol=1e-6, q_0=None, k_fb_0=None):\n\n success = True\n feasible = True\n if crashed:\n feasible = False\n\n if self.verbosity > 1:\n print(\"Optimization crashed, infeasible soluion!\")\n else:\n g_res = np.array(sol[\"g\"]).squeeze()\n\n # This is not sufficient, since casadi gives out wrong feasibility values\n if np.any(np.array(self.lbg) - feas_tol > g_res) or np.any(\n np.array(self.ubg) + feas_tol < g_res):\n feasible = False\n\n x_opt = sol[\"x\"]\n self.has_openloop = True\n\n if self.opt_x0:\n x_0 = x_opt[:self.n_s]\n x_opt = x_opt[self.n_s:, :]\n\n # get indices of the respective variables\n n_u_0 = self.n_u\n n_u_perf = 0\n if self.n_perf > 1:\n n_u_perf = (self.n_perf - self.r) * self.n_u\n n_k_ff = (self.n_safe - 1) * self.n_u\n\n c = 0\n idx_u_0 = np.arange(n_u_0)\n c += n_u_0\n idx_u_perf = np.arange(c, c + n_u_perf)\n c += n_u_perf\n idx_k_ff = np.arange(c, c + n_k_ff)\n c += n_k_ff\n\n u_apply = np.array(cas_reshape(x_opt[idx_u_0], (1, self.n_u)))\n k_ff_perf = np.array(\n cas_reshape(x_opt[idx_u_perf], (self.n_perf - self.r, self.n_u)))\n\n k_ff_safe = np.array(\n cas_reshape(x_opt[idx_k_ff], (self.n_safe - 1, self.n_u)))\n k_ff_safe_all = np.vstack((u_apply, k_ff_safe))\n\n k_fb_safe_output = array_of_vec_to_array_of_mat(np.copy(k_fb), self.n_u,\n self.n_s)\n\n p_safe, q_safe, gp_sigma_pred_safe_all = self.get_safety_trajectory_openloop(x_0, u_apply,\n np.copy(k_fb),\n k_ff_safe, q_0, k_fb_0)\n\n p_safe = np.array(p_safe)\n q_safe = np.array(q_safe)\n\n if self.verbosity > 1:\n print(\"=== Safe Trajectory: ===\")\n print(\"Centers:\")\n print(p_safe)\n print(\"Shape matrices:\")\n print(q_safe)\n print(\"Safety controls:\")\n print(u_apply)\n print(k_ff_safe)\n\n k_fb_perf_traj_eval = np.empty((0, self.n_s * self.n_u))\n k_ff_perf_traj_eval = np.empty((0, self.n_u))\n if self.n_safe > 1:\n k_fb_perf_traj_eval = np.vstack(\n (k_fb_perf_traj_eval, k_fb[:self.r - 1, :]))\n k_ff_perf_traj_eval = np.vstack(\n (k_ff_perf_traj_eval, k_ff_safe[:self.r - 1, :]))\n if self.n_perf > self.r:\n k_fb_perf_traj_eval = np.vstack((k_fb_perf_traj_eval,\n np.matlib.repmat(k_fb_perf_0,\n self.n_perf - self.r,\n 1)))\n k_ff_perf_traj_eval = np.vstack((k_ff_perf_traj_eval, k_ff_perf))\n\n if self.n_perf > 1:\n mu_perf, sigma_perf = self._f_multistep_perf_eval(x_0.squeeze(),\n u_apply,\n k_fb_perf_traj_eval,\n k_ff_perf_traj_eval)\n\n if self.verbosity > 1:\n print(\"=== Performance Trajectory: ===\")\n print(\"Mu perf:\")\n print(mu_perf)\n print(\"Peformance controls:\")\n print(k_ff_perf_traj_eval)\n\n feasible, _ = self.eval_safety_constraints(p_safe, q_safe)\n\n if self.rhc and feasible:\n self.k_ff_safe = k_ff_safe\n self.k_ff_perf = k_ff_perf\n self.p_safe = p_safe\n self.k_fb_safe_all = np.copy(k_fb)\n self.u_apply = u_apply\n self.k_fb_perf_0 = k_fb_perf_0\n\n if feasible:\n self.n_fail = 0\n\n if not feasible:\n self.n_fail += 1\n q_all = None\n k_fb_safe_output = None\n k_ff_all = None\n p_safe = None\n q_safe = None\n g_res = None\n\n if self.n_fail >= self.n_safe:\n # Too many infeasible solutions -> switch to safe controller\n if self.verbosity > 1:\n print(\n \"Infeasible solution. Too many infeasible solutions, switching to safe controller\")\n u_apply = self.safe_policy(x_0)\n k_ff_safe_all = u_apply\n else:\n # can apply previous solution\n if self.verbosity > 1:\n print((\n \"Infeasible solution. Switching to previous solution, n_fail = {}, n_safe = {}\".format(\n self.n_fail, self.n_safe)))\n if sol_verbose:\n u_apply, k_fb_safe_output, k_ff_safe_all, p_safe = self.get_old_solution(\n x_0, get_ctrl_traj=True)\n else:\n u_apply = self.get_old_solution(x_0)\n k_ff_safe_all = u_apply\n\n if sol_verbose:\n return x_0, u_apply, feasible, success, k_fb_safe_output, k_ff_safe_all, p_safe, q_safe, sol, gp_sigma_pred_safe_all\n\n return x_0, u_apply, success", "def solve(self, use_cache=True):\n if self.parallel:\n self.solve_all_parallel(use_cache)\n else:\n self.solve_all(use_cache)", "def LTD_SolveCase(mirror=None):\n if mirror == None:\n flatStart = 0\n else:\n flatStart = 0 # never flat start ( could be changed to solnType options ) or reorder?\n if mirror.debug: print('flat start = %d' % flatStart)\n\n soln_start = time.time()\n errorCode = PSLF.SolveCase(\n 25, # maxIterations, Solpar.Itnrmx\n 0, \t# iterationsBeforeVarLimits, Solpar.Itnrvl\n 0,\t# flatStart, \n 1,\t# tapAdjustment, Solpar.Tapadj 1\n 1,\t# switchedShuntAdjustment, Solpar.Swsadj 1\n 1,\t# phaseShifterAdjustment, Solpar.Psadj 1\n 0,\t# gcdAdjustment, probably Solpar.GcdFlag 0\n 0,\t# areaInterchangeAdjustment, \n 1,\t# solnType, 1 == full, 2 == DC, 3 == decoupled \n 0, # reorder (in dypar default = 0)\n )\n soln_end = time.time()\n\n #handle timing \n if mirror:\n mirror.PFTime += (soln_end - soln_start)\n mirror.PFSolns += 1\n if mirror.debug: print('Power Flow Solution returns: %d' % errorCode)\n\n if errorCode == -1:\n '''Solution did not converge'''\n raise ValueError('*** PSLF power-flow solution did not converge.')\n return\n if errorCode == -2:\n '''Maximum iterations hit'''\n raise ValueError('*** PSLF power-flow solution stopped due to maximum number of iterations.')\n return\n\n #converged\n return", "def solution(data):\n lines = preprocess(data)\n solver = Code(lines)\n return solver.solve()" ]
[ "0.6437674", "0.63575566", "0.63160455", "0.6253777", "0.6242397", "0.61891377", "0.6080901", "0.6075275", "0.60319287", "0.6008029", "0.5985438", "0.5956081", "0.5951685", "0.5910511", "0.5889266", "0.588267", "0.5834767", "0.58335656", "0.58294004", "0.5818063", "0.5789487", "0.5777485", "0.5777485", "0.57704216", "0.57280904", "0.571287", "0.57128173", "0.57099754", "0.57089484", "0.5702032", "0.57010525", "0.5696889", "0.56777626", "0.566149", "0.56482714", "0.5619358", "0.5609419", "0.5608507", "0.5594447", "0.55859536", "0.5585356", "0.55742884", "0.5572157", "0.55398196", "0.55332124", "0.5525419", "0.552506", "0.552367", "0.5510882", "0.5510882", "0.55092674", "0.5501275", "0.5500613", "0.5490207", "0.548965", "0.5465869", "0.54639244", "0.5458224", "0.5455949", "0.54550517", "0.545214", "0.54416364", "0.5437795", "0.54342735", "0.5428875", "0.54272485", "0.5424262", "0.5423574", "0.54207575", "0.5416649", "0.54095036", "0.54089326", "0.5408309", "0.54047215", "0.5403294", "0.5395282", "0.5379545", "0.53731763", "0.53730303", "0.5369547", "0.5368237", "0.53651583", "0.53651583", "0.53651583", "0.5356395", "0.5349778", "0.5340509", "0.5339183", "0.53303146", "0.5321686", "0.5321561", "0.53161776", "0.5315009", "0.5313509", "0.53050447", "0.5302042", "0.5301201", "0.52995425", "0.5291201", "0.5287753" ]
0.66944
0
The argument p is assumed to be some permutation of 0, 1, ..., len(p)1. Returns an array s, where s[i] gives the index of i in p.
def invert_permutation(p): s = np.empty_like(p) s[p] = np.arange(p.size) return s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def perm_invert(p):\n q = [None] * len(p)\n for i, j in enumerate(p):\n q[j] = i\n return q", "def sample_from(self, p):\n return np.searchsorted(np.cumsum(p), np.random.rand())", "def permute_2d(m, p):\r\n return m[p][:, p]\r\n # unused below\r\n m_t = transpose(m)\r\n r_t = take(m_t, p, axis=0)\r\n return take(transpose(r_t), p, axis=0)", "def perm_conjugate(p, s):\n q = [None] * len(p)\n for i in range(len(p)):\n q[s[i]] = s[p[i]]\n return q", "def gen_rand_index(p, n):\n # TODO Check args here\n \n # TODO: check each value of inverse distribution is\n # different\n invcdf = N.cumsum(p)\n uni = rand(n)\n index = N.zeros(n, dtype=int)\n\n # This one should be a bit faster\n for k in range(len(p)-1, 0, -1):\n blop = N.where(N.logical_and(invcdf[k-1] <= uni, \n uni < invcdf[k]))\n index[blop] = k\n \n return index", "def sample(a, p):\n if (len(a) != len(p)):\n raise Exception('a != p')\n p = np.array(p)\n p = p / p.sum()\n r = random.random()\n n = len(a)\n total = 0 # range: [0,1]\n for i in xrange(n):\n total += p[i]\n if total > r:\n return a[i]\n return a[i]", "def pseudorandom(n, p, key):\n import numpy as np\n p = list(p)\n cp = np.cumsum([0] + p)\n assert np.allclose(1, cp[-1])\n assert len(p) < 256\n\n x = np.random.RandomState(key).random_sample(n)\n out = np.empty(n, dtype='i1')\n\n for i, (low, high) in enumerate(zip(cp[:-1], cp[1:])):\n out[(x >= low) & (x < high)] = i\n return out", "def pflip(P):\n if len(P) == 1:\n return 0\n\n P /= sum(P)\n\n assert math.fabs(1.0-sum(P)) < 10.0**(-10.0)\n\n p_minus = 0\n r = np.random.rand()\n for i in range(len(P)):\n P[i] += p_minus\n p_minus = P[i]\n if r < p_minus:\n return i\n\n raise IndexError(\"pflip:failed to find index\")", "def lift_perm(p: Dict[int, int]) -> np.ndarray:\n n = len(p)\n pm = np.zeros((1 << n, 1 << n), dtype=complex)\n for i in range(1 << n):\n j = 0\n mask = 1 << n\n for q in range(n):\n mask >>= 1\n if (i & mask) != 0:\n j |= 1 << (n - 1 - p[q])\n pm[j][i] = 1\n return pm", "def index_to_feature(p, dims):\n feature = []\n for dim in dims:\n feature.append(p % dim)\n p //= dim\n return feature", "def permute(seq, permutation):\n return [seq[i] for i in permutation]", "def naive(p, t):\n\toccurence = []\n\tfor i in range(len(t)-len(p) + 1):\n\t\tmatch = True\n\t\tfor j in range(len(p)):\n\t\t\tif not p[j] == t[i+j]:\n\t\t\t\tmatch = False\n\t\t\t\tbreak\n\t\tif match:\n\t\t\toccurence.append(i)\n\treturn occurence", "def decoder(permutation):\n depermutation = []\n for x in range (0, len (permutation)):\n depermutation.append (permutation.index(x))\n return depermutation", "def point_location(tri, p): \n simplex_index = tri.find_simplex(p)\n bc = []\n for id_, point in zip(simplex_index, p):\n # Calculate the two first barycentric coordinates for the relevant\n # simplex\n b = tri.transform[id_, :2].dot(point-tri.transform[id_, 2])\n bc.append(np.c_[np.atleast_2d(b), 1-b.sum()])\n # Create the full array and squeeze the shit out of it\n bc = np.array(bc).squeeze()\n return simplex_index, bc", "def naive(p, t):\n occurences = []\n for i in range(len(t) - len(p) + 1):\n match = True\n for j in range(len(p)):\n if t[i + j] != p[j]:\n match = False\n break\n if match:\n occurences.append(i)\n return occurences", "def discrete_rv(p):\n u = np.random.uniform()\n cdf = np.cumsum(p)\n j = np.searchsorted(cdf, u)\n return j", "def _permutation_to_vertex(self, p):\n return (\n tuple(p._labels[0]),tuple(p._labels[1]),\n tuple(p._twin[0]),tuple(p._twin[1]))", "def values(self, ps):\n ps = np.asarray(ps)\n if np.any(ps < 0) or np.any(ps > 1):\n raise ValueError('Probability p must be in range [0, 1]')\n\n index = np.searchsorted(self.ps, ps, side='left')\n return self.xs[index]", "def pentagonal_index(P):\n return (1 + sqrt(1 + 24 * P)) / 6", "def cycles(p: List[int]) -> List[Set[int]]:\n validate_permutation(p)\n\n todo = list(range(len(p)))\n cycles = []\n\n while todo:\n start = todo.pop(0)\n\n cycle = (start,)\n position = p[start]\n\n while position != start:\n todo.remove(position)\n cycle += (position, )\n position = p[position]\n\n cycles.append(cycle)\n\n return cycles", "def permute(self, arr):\n\n return arr[self.permutation_idxs]", "def permutation(s):\n if len(s) == 1:\n return [s]\n result = []\n first = s[0]\n ss = s[1:]\n pers = permutation(ss)\n for p in pers:\n for i in range(0,len(p)):\n result.append(p[:i]+first+p[i:])\n return result", "def _permutation_to_vertex(self, p):\n return (tuple(p._labels[0]),tuple(p._labels[1]),\n tuple(p._twin[0]), tuple(p._twin[1]),\n tuple(p._flips[0]), tuple(p._flips[1]))", "def sample(x, p=None):\n s = np.random.random_sample()\n if p is None:\n return x[int(s*len(x))]\n else:\n p = np.cumsum(p)\n p = p / float(p[-1])\n return x[sum(s >= p)]", "def _P(m):\n P = np.zeros((m**2,m**2), dtype=np.int64)\n for i in range(1, m**2 + 1):\n j = 1 + m*((i - 1) % m) + (i - 1)//m\n P[i-1, j-1] = 1\n return P", "def get_array_index_permutations(param):\n indices = list()\n\n try:\n for d in reversed(param.get(\"dimensions\")):\n i = list()\n for x in range(0, d.get(\"len\")):\n i.append(x)\n indices.append(i)\n\n array_dereferences = list(itertools.product(*indices))\n return array_dereferences\n\n except TypeError:\n return list()", "def _get_sample(self, p: float) -> np.ndarray:\n return np.where(self.rand_array >= p, 0, 1)", "def decode_from_P(P):\n N = P.shape[0]\n A = P.shape[1]\n \n X = np.arange(N)\n \n for i in range(N):\n max_val = -1e100\n for a in range(A):\n if P[i,a] > max_val:\n max_val = P[i,a]\n X[i] = a\n \n return X", "def p2vertices(self, p):\n h = self.top\n verts = np.empty((self.nparams + 2, 2))\n verts[:, 0] = self._modelx\n verts[:, 1] = np.concatenate([[h], p, [h]])\n return verts", "def perm_vs_hyp():\n\n return [\"P\",\"P\",\"P\",\"P\",\"P\"]", "def permute(p, dims, perm):\n if issparse(p):\n return _permute_sparse(p, dims, perm)\n return _permute_dense(p, dims, perm)", "def mat24_perm_to_int(p):\n oct = sum(1 << x for x in p[:8])\n res = gc.vect_to_octad(oct) \n #print(\"p2i oct\", hex(oct))\n res -= STD_OCTAD\n res += (res >> 12) & 759 \n #print(\"p2i\", res)\n p1 = [24]*32\n oct, j = 8 * oct, 0x00 \n for i in range(24):\n o = oct & 8\n p1[i] = (j >> o) & 0x1f\n j += 1 << o\n oct >>= 1\n q, q_inv = [None]*8, [None]*8\n for i in range(8):\n j = p1[p[i] & 0x1f] & 7\n q[j] = i\n q_inv[i] = j\n for i in range(6):\n # exchange place i with place q_inv[i]\n j = q_inv[i]\n #q_inv[q[i]], q_inv[q[j]] = q_inv[q[j]], q_inv[q[i]]\n #q[i], q[j] = q[j], q[i]\n #assert q[:i] == q_inv[:i] == lrange(i)\n q_inv[q[i]] = q_inv[q[j]]\n q[j] = q[i]\n #print(\"p2i%d\" % i, j-i) \n res = res * (8 - i) + j - i\n #print(\"p2ifinal\", p1[p[8] & 0x1f]) \n return 16 * res + p1[p[8] & 0x1f]", "def short_perm_to_inv_seq(perm):\n\tperm = list(perm)\n\tinv_seq = [0 for _ in xrange(len(perm))]\n\tfor i in xrange(len(perm)):\n\t\tfor j in xrange(i + 1, len(perm)):\n\t\t\tif perm[i] > perm[j]:\n\t\t\t\tinv_seq[perm[j] - 1] += 1\n\treturn inv_seq", "def perp_vector(p, q, r):\n v = cross(q - r, q - p)\n return v / mod(v) + q", "def mod_inv(a,p):\r\n\r\n for i in range(1,p):\r\n if (i*a)%p==1: return i\r\n raise ValueError(str(a)+\" has no inverse mod \"+str(p))", "def validate_permutation(p):\n if not isinstance(p, list):\n raise ValueError(\"A permutation should be a list of integers\")\n\n for i in p:\n if not isinstance(i, int):\n raise ValueError(\"A permutation should be a list of integers\")\n\n if set(p) != set(range(len(p))):\n raise ValueError(\"A permutation should only contain each position exactly once\")", "def orientation_function(a,b,p):\r\n if (p == 0):\r\n return True\r\n else: \r\n v = np.zeros((len(a),), dtype = int)\r\n for i in range(len(a)):\r\n for j in range(len(b)):\r\n if (eq_elements(a[i],b[j]) == True):\r\n v[j] = i\r\n P = Permutation(v)\r\n return P.is_even", "def next_permutation(P):\n n = len(P)\n\n # Find the first index with the bigger neighbour.\n i = _first_index_with_bigger_neighbour(P)\n\n # If this is the first, where i=0, then there is no next permutation.\n if i == 0:\n return P\n\n # From the right, find a value in P that is smaller than\n # the previous found value.\n j = n - 1\n while P[j] <= P[i-1]:\n j -= 1\n\n # Swap the values\n P[i-1], P[j] = P[j], P[i-1]\n\n # Restore the tail of the permutation.\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n\n return P", "def _pfunc(i,j,perm):\n if perm[i-1] == j:\n return 1\n else:\n return 0", "def permutation_stefan(self, nums):\n perms = [[]]\n # for n in nums:\n # print(f'n = {n}')\n # new_perms = []\n # for p in perms:\n # print(f'p = {p}, (p+[n]).index(n) = {(p+[n]).index(n)}')\n # for i in range((p+[n]).index(n)+1):\n # tmp = p[:i] + [n] + p[i:]\n # print(f'tmp = {tmp}')\n # new_perms.append(tmp)\n # perms = new_perms\n # print(f'perms = {perms}')\n for n in nums:\n perms = [p[:i]+[n]+p[i:] for p in perms for i in range((p+[n]).index(n)+1)]\n return perms", "def permutation_helper(random_state, n, shape):\r\n # n should be a 0-dimension array\r\n assert n.shape == ()\r\n # Note that it is important to convert `n` into an integer, because if it\r\n # is a long, the numpy permutation function will crash on Windows.\r\n n = int(n.item())\r\n\r\n if shape is None:\r\n # Draw only one permutation, equivalent to shape = ()\r\n shape = ()\r\n out_shape = list(shape)\r\n out_shape.append(n)\r\n out = numpy.empty(out_shape, int)\r\n for i in numpy.ndindex(*shape):\r\n out[i] = random_state.permutation(n)\r\n\r\n #print 'RETURNING', out.shape\r\n return out", "def naive_2mm(p, t):\n\toccurence = []\n\tfor i in range(len(t)-len(p) + 1):\n\t\tmatch = True\n\t\tunmatch = 0\n\t\tfor j in range(len(p)):\n\t\t\tif not p[j] == t[i+j]:\n\t\t\t\tunmatch += 1\n\t\t\t\tif unmatch > 2:\n\t\t\t\t\tmatch = False\n\t\t\t\t\tbreak\n\t\tif match:\n\t\t\toccurence.append(i)\n\treturn occurence", "def _precompute_substrings(self, p: int) -> List[int]:\n hash_vals: List[int] = [0]\n for i in range(len(self._s)):\n val = (hash_vals[i] * self.X + ord(self._s[i])) % p\n hash_vals.append(val)\n\n return hash_vals", "def i_to_p(i):\n return [bit_component(i,j) for j in (0,1,2)]", "def p_simplex(self, p):\r\n return list(filter(lambda face: (len(face) == p+1) , self.faces()))", "def _choose_random_segment(patients, size=None, segment_p=None):\n num_segments_per_patient = np.array([signal.shape[0] for _, (signal, _) in patients])\n first_segment_index_by_patient = np.cumsum(num_segments_per_patient) - num_segments_per_patient\n num_segments = num_segments_per_patient.sum()\n if segment_p is None:\n p = np.ones(num_segments) / num_segments\n else:\n patient_index, segment_index, segment_prob = segment_p\n p_index = first_segment_index_by_patient[patient_index] + segment_index\n if num_segments <= p_index < 0:\n raise ValueError('The provided patient and segment indices are invalid')\n if 1. < segment_prob < 0.:\n raise ValueError('Probability must lie in the [0, 1] interval')\n p = (1 - segment_prob) * np.ones(num_segments) / (num_segments - 1)\n p[p_index] = segment_prob\n segment_ids = np.random.choice(num_segments, size=size, p=p)\n if size is None:\n patient_index = np.searchsorted(first_segment_index_by_patient, segment_ids, side='right') - 1\n segment_index = segment_ids - first_segment_index_by_patient[patient_index]\n return patient_index, segment_index\n else:\n indices = []\n for segment_id in segment_ids:\n patient_index = np.searchsorted(first_segment_index_by_patient, segment_id, side='right') - 1\n segment_index = segment_id - first_segment_index_by_patient[patient_index]\n indices.append((patient_index, segment_index))\n return indices", "def random_choice(p, size):\n k = p.shape[-1]\n\n if p.ndim > 1:\n # If p is an nd-array, the last axis is interpreted as the class\n # probability. We must iterate over the elements of all the other\n # dimensions.\n # We first ensure that p is broadcasted to the output's shape\n size = to_tuple(size) + (1,)\n p = np.broadcast_arrays(p, np.empty(size))[0]\n out_shape = p.shape[:-1]\n # np.random.choice accepts 1D p arrays, so we semiflatten p to\n # iterate calls using the last axis as the category probabilities\n p = np.reshape(p, (-1, p.shape[-1]))\n samples = np.array([np.random.choice(k, p=p_) for p_ in p])\n # We reshape to the desired output shape\n samples = np.reshape(samples, out_shape)\n else:\n samples = np.random.choice(k, p=p, size=size)\n return samples", "def _get_permutated_segments_indices(\n self, randomized: bool, random_state: Optional[np.random.mtrand.RandomState]\n ) -> np.ndarray:\n idx = np.arange(self.dy.size)\n\n if randomized:\n if random_state is None:\n random_state = np.random.RandomState()\n idx = random_state.permutation(idx)\n return idx", "def projective_point(p):\n from sage.rings.integer import GCD_list, LCM_list\n try:\n p_gcd = GCD_list([x.numerator() for x in p])\n p_lcm = LCM_list([x.denominator() for x in p])\n except AttributeError:\n return p\n scale = p_lcm / p_gcd\n return [scale * x for x in p]", "def permutation(self, x):\r\n x = array(x)\r\n x = roll(x, self.num_calls)\r\n self.num_calls += 1\r\n return x", "def permutation(a):\n rs = _generator.get_random_state()\n return rs.permutation(a)", "def long_perm_to_inv_seq(perm):\n\tinversions = cl.defaultdict(int)\n\tin_order = mergesort(list(perm), inversions)\n\treturn [inversions[i] for i in xrange(1, len(perm) + 1)]", "def nextPermutation(self, nums: List[int]) -> None:\n if not nums: #general basecase\n return\n right = len(nums) - 1 #start at far end of the array\n for i in range(len(nums) - 1, 0, -1): #continue until you are at 1st index\n if nums[i - 1] < nums[i]: \n right = i\n break\n if i - 1 == 0:\n return self.reverse_nums(i - 1, len(nums) -1, nums) #just reverse and return\n pivot = right - 1 #this is where array will be pivoted\n succesor = 0 \n for i in range(len(nums) -1, pivot, -1): #go up until pivot\n if nums[i] > nums[pivot]:\n succesor = i \n break\n nums[pivot], nums[succesor] = nums[succesor], nums[pivot] #swap pivot and successor\n self.reverse_nums(right, len(nums) - 1, nums) # from right put the rest of the array in ascending order", "def _re(self, p):\n return self.edges[:, 0, :] - p # 0 is arbitrary - the other end also works", "def print_permutation(p: List[int]):\n validate_permutation(p)\n\n C = cycles(p)\n s = ''\n for cycle in C:\n cyclestr = '('\n for el in cycle:\n cyclestr += str(el) + ','\n s += cyclestr[:len(cyclestr) - 1] + ')'\n if s == '':\n s = '()'\n print(s)", "def data_gen(size, p):\n #print(np.random.get_state()[1][0])\n random_table = np.random.binomial(size = size, p = p, n = 1)\n test_array = np.zeros((size, 2), dtype = int)\n for i in range(size):\n test_array[i,0] = i\n test_array[i,1] = random_table[i]\n return test_array", "def uniform_but_one_dataset(n, p):\n elements = []\n for i in range(n):\n elements.append((i, 1))\n elements.append((1, (n**(1.0 / p)) - 1))\n return elements", "def short_inv_seq_to_perm(inv_seq):\n\t# O(n^2), but not as much bookkeeping\n\tperm = [0 for _ in xrange(len(inv_seq))]\n\tfor j, inv_count in enumerate(inv_seq):\n\t\ti, num_zeros = 0, 0\n\t\tplaced = False\n\t\twhile not placed:\n\t\t\tif perm[i] != 0:\n\t\t\t\ti += 1\n\t\t\telif num_zeros < inv_count:\n\t\t\t\tnum_zeros += 1\n\t\t\t\ti += 1\n\t\t\telse:\n\t\t\t\tperm[i] = j + 1\n\t\t\t\tplaced = True\t\t\n\treturn Permutation(perm)", "def get_sorted_pandigital(m):\n perms = get_all_permutations(range(m,0,-1))\n\n for perm in perms: \n # per is a m-length tuple\n perm = list2num(perm)\n yield perm", "def inverse_permutation(permutation):\n arange = torch.arange(permutation.size(-1), device=permutation.device)\n res = torch.zeros_like(permutation).scatter_(-1, permutation, arange.expand_as(permutation))\n return res", "def p(self) -> np.ndarray:\n return self._vector[18:20]", "def tuple_permutation(v,P):\r\n u = []\r\n w = list(v)[:]\r\n test = True\r\n for i in range(len(v)):\r\n if ((isinstance(v[i], int) == True) or (isinstance(v[i], str) == True)):\r\n if (v[i] in P):\r\n w[i] = P(v[i])\r\n else:\r\n u.append(tuple_permutation(tuple(v[i]),P))\r\n test = False\r\n if (test == True):\r\n return tuple(w)\r\n else:\r\n return tuple(u)", "def __getHints(self, p):\n st = bisect.bisect_left(self.index, (p[:self.ln], -1)) # binary search\n en = bisect.bisect_right(self.index, (p[:self.ln], sys.maxsize)) # binary search\n hits = self.index[st:en] # this range of elements corresponds to the hits\n return [h[1] for h in hits] # return just the offsets", "def get_random_perm(n):\n import random\n x = range(n) # Time complexity is O(n)\n for i in xrange(len(x)):\n j = random.randint(i, len(x) - 1) # Time complexity is O(logn)\n swap(x, i, j)\n return x", "def random_state(N, p):\n m = int(N * p)\n s = np.concatenate([np.ones(m), np.ones(N-m) * -1]).astype(np.int8)\n np.random.shuffle(s)\n return s", "def permute(p,l,length):\n assert length >= 0\n if length == 0:\n\tprint p\n\treturn\n\n for i in range(0,length):\n\tn = p + (l[i],) \n\tpermute(n,l[0:i]+l[i+1:],length-1)", "def vec_repeat_at_start(x, p):\n n = x.shape[0]\n indices = (jnp.arange(p) + n - p) % n\n padding = x[indices]\n return jnp.concatenate((padding, x))", "def create_dataset_splits(n, p=1.0):\n\tperm = np.random.permutation(n).tolist()\n\tidx = int(p * n)\n\treturn perm[:idx]", "def convert2int(self,seq_pep):\n\t\treturn [self.aminoacids.index(pep) for pep in seq_pep]", "def compute_lps_array(pattern):\n lps = [0 for _ in pattern]\n j = 0\n i = 1\n while i < len(pattern):\n if pattern[i] == pattern[j]:\n lps[i] = j + 1\n j += 1\n i += 1\n elif j != 0:\n j = lps[j-1]\n else:\n lps[i] = 0\n i += 1\n return lps", "def GetRandomInd(Prob):\n if USELIB:\n return proteinlib.getrandomind(Prob, random.random())\n else:\n p = Prob[0]\n i = 0\n r = random.random()\n n = len(Prob)\n while i < n - 1 and p < r:\n i += 1\n p += Prob[i]\n return i", "def polyder_vec(p, m):\n factorial = np.math.factorial\n m = np.asarray(m, dtype=int) # order of derivative\n p = np.atleast_2d(p)\n order = p.shape[1] - 1\n\n D = np.arange(order, -1, -1)\n num = np.array([factorial(i) for i in D], dtype=object)\n den = np.array([factorial(max(i - m, 0)) for i in D], dtype=object)\n D = (num // den).astype(p.dtype)\n\n p = np.roll(D * p, m, axis=1)\n idx = np.arange(p.shape[1])\n p = np.where(idx < m, 0, p)\n\n return p", "def get_p_states(m):\n p_lst = list()\n for i in range(m):\n p_state = input(\"p[{}] = \".format(i+1)) != \"0\"\n p_lst.append(p_state)\n return p_lst", "def josephus(self, m, n, s = 1):\n from collections import deque\n m -= 1\n if s <= 0:\n s = 1\n Q = deque(range(n))\n perm = []\n while len(Q) > s:\n for dp in xrange(m):\n Q.append(Q.popleft())\n perm.append(Q.popleft())\n perm.extend(list(Q))\n return Permutation(perm)", "def y_from_p(p):\n\ty = -np.log(p)\n\treturn y", "def beta_gen_posmnt(p):\n return np.array([0.0]*int(0.7*p) + [1.0]*(p-int(0.7*p)))", "def nextPermutation(self, nums: List[int]) -> None:\n p = len(nums) - 1\n index = -1\n \n while p > 0:\n if nums[p] > nums[p-1]:\n index = p-1\n break\n p -= 1\n \n if index == -1:\n nums.reverse()\n else:\n for i in range(len(nums)-1, p-1,-1):\n if nums[i] > nums[index]:\n swapindice = i\n break\n \n nums[index], nums[swapindice] = nums[swapindice], nums[index]\n nums[p:] = nums[p:][::-1]", "def test_permutation(n: int) -> List[int]:\n permutation = [i + 1 for i in range(n)]\n cycle_length = 2\n cycle_index = 0\n\n while cycle_length + cycle_index < n:\n permutation[cycle_index + cycle_length - 1] = cycle_index\n cycle_index += cycle_length\n cycle_length += 1\n\n if n > 0:\n permutation[n - 1] = cycle_index\n\n return permutation", "def inv_gauss_int(p):\n #Brute force approach. Limited accuracy for >3sigma\n #find something better \n #DO NOT USE IN LOOPS (very slow)\n if p<0. or p>1.:\n print('Wrong value for p(',p,')!')\n sys.exit()\n step=.00001\n xn=arange(0.,4.+step,step)\n gn=1./sqrt(2.*pi)*exp(-xn**2/2.)\n cgn=add.accumulate(gn)*step\n p=p/2.\n ind=searchsorted(cgn,p)\n return xn[ind]", "def vec_repeat_at_end(x, p):\n n = x.shape[0]\n indices = jnp.arange(p) % n\n padding = x[indices]\n return jnp.concatenate((x, padding))", "def _get_perm_phase(order, phase):\n n_points = len(order)\n return phase ** sum(\n 1 for i in range(n_points) for j in range(i + 1, n_points)\n if order[i] > order[j]\n )", "def gini_scorer(a,p):\n\n return gini_normalized(a,p[:,1])", "def _permute_dense(p, dims, perm):\n p, perm = np.asarray(p), np.asarray(perm)\n d = prod(dims)\n\n if isop(p):\n return (\n p.reshape([*dims, *dims])\n .transpose([*perm, *(perm + len(dims))])\n .reshape([d, d])\n )\n\n return p.reshape(dims).transpose(perm).reshape([d, 1])", "def find_permutation(fig):\n permutation = zip([next(x for x in xs if isinstance(x, int)) for xs in fig], count(1))\n permutation.sort()\n return [x for _, x in permutation]", "def sign(p):\n to_count = filter(lambda x: x[0] > x[1], combinations(p, 2))\n sign_exp = sum(1 for _ in to_count) % 2\n return (-1)**sign_exp", "def p_to_color_seq(p):\n cmap = cm.get_cmap('Reds')\n# help(cmap)\n return cmap(p)", "def p_to_prediction(self, p):\n if isinstance(p, list):\n return [self.p_to_prediction(_p) for _p in p]\n elif isinstance(p, Normal):\n pred = p.loc.cpu().detach().numpy()\n elif isinstance(p, Bernoulli):\n pred = p.probs.cpu().detach().numpy()\n else:\n raise NotImplementedError\n return pred", "def permutor(text, permutation):\n scrambled = \"\"\n for x in permutation:\n scrambled = scrambled + text[x]\n return scrambled", "def prime_array(number_of_primes) -> array:\n p = array('i',list(primes(number_of_primes)))\n return p", "def permutation_in_simplex_test(vec, P):\r\n s = P_chains([],[])\r\n if (vec.dic != {}):\r\n v = list(vec.dic.keys())\r\n p = len(v[0]) - 1\r\n faces = []\r\n values = []\r\n for a in v:\r\n if (isinstance(a, int) == True): \r\n return vec\r\n else:\r\n w = tuple_permutation(a,P)\r\n w1 = tuple_sorted(w)\r\n if (orientation_function(w1,w,p) == True):\r\n faces.append(tuple(w1))\r\n values.append(vec.dic[a])\r\n else:\r\n faces.append(tuple(w1))\r\n values.append((-1)*vec.dic[a])\r\n s = P_chains(faces,values)\r\n return s\r\n else:\r\n return s", "def viterbi_sparse(p_emit: Sequence[np.ndarray], p_trans: Sequence[np.ndarray]) -> np.ndarray:\n\n T = len(p_emit)\n\n assert T - 1 == len(p_trans)\n\n trellis = [p_emit[0]]\n states = [None]\n\n for t in range(1, T):\n weighted_scores = trellis[-1][:, None] + p_trans[t - 1] # [x, y] # scores and p_trans broadcasted\n max_scores = np.amax(weighted_scores, axis=0) # [y]\n trellis.append(np.add(max_scores, p_emit[t])) # [y] remember highest score of each path\n states.append(np.argmax(weighted_scores, axis=0)) # [y] remember index of best path\n\n assert len(trellis) == T and len(states) == T\n\n tokens = [None] * T # [T]\n tokens[-1] = np.argmax(trellis[-1], axis=0) # []\n\n for t in range(T - 1, 0, -1):\n tokens[t - 1] = states[t][tokens[t]] # []\n\n return tokens", "def p2_pits(self):\n return self.state[self.M + 1:-1]", "def get_pent_idx(pent):\n pidx = 0\n for i in range(pent.shape[0]):\n for j in range(pent.shape[1]):\n if pent[i][j] != 0:\n pidx = pent[i][j]\n break\n if pidx != 0:\n break\n if pidx == 0:\n return -1\n return pidx - 1", "def duplicate_in_array(s):\r\n import collections\r\n c = collections.Counter(s)\r\n for x in s:\r\n if c[x]>1:\r\n return s.index(x)\r\n return -1", "def nextPermutation(self, nums: List[int]) -> None:\n pass", "def find(self, p):\n\n # Find the root of the component/set\n root = p\n while root != self.id[root]:\n root = self.id[root]\n\n # Compress the path leading back to the root\n # This operation is called \"path compression\" and is\n # waht gives the amortized constant time complexity.\n while p != root:\n next = self.id[p]\n self.id[p] = root\n p = next\n\n return root", "def simplex_proj_mat(P):\n P_proj = np.zeros_like(P)\n for i in range(P.shape[0]):\n P_proj[i,:] = simplex_proj(P[i,:])\n \n return P_proj", "def partition_random(A, p, r):\n n = random.randint(p, r)\n A[n], A[r] = A[r], A[n]\n x = A[r]\n i = p-1\n for j in range(p, r):\n if A[j] <= x:\n i += 1\n A[i], A[j] = A[j], A[i]\n A[i+1], A[r] = A[r], A[i+1]\n return i+1", "def makePermutations(n):\n\thalf = n // 2\n\tfull = half * 2\n\tswap = np.random.rand(half) > 0.5\n\tpx = np.arange(n)\n\tpx[:full:2] += swap\n\tpx[1:full:2] -= swap\n\treturn px", "def a_ij(s, p, i=1, j=1): # (Validated)\n from math import sqrt\n if i == j:\n return s.c[i]['a'] # Return pure paramater\n else: # find mixture aij i =/= j\n return (1 - p.m['k'][i][j]) * sqrt(s.c[i]['a'] * s.c[j]['a'])" ]
[ "0.6614275", "0.5957587", "0.58996975", "0.5898854", "0.58252364", "0.574952", "0.57436204", "0.5726088", "0.57004726", "0.5664967", "0.5657052", "0.56268764", "0.55960995", "0.55443096", "0.55430853", "0.5538524", "0.550933", "0.55085063", "0.5503721", "0.5452929", "0.543458", "0.5420169", "0.54064", "0.54035187", "0.54008234", "0.53937685", "0.5385929", "0.5383989", "0.53809196", "0.53575355", "0.5340586", "0.5330597", "0.53258884", "0.5294583", "0.527425", "0.5264803", "0.5263943", "0.5257962", "0.5246965", "0.52456164", "0.5240246", "0.52388126", "0.5235801", "0.5178887", "0.5173281", "0.5156179", "0.5151849", "0.5135107", "0.5122915", "0.5116508", "0.51000804", "0.50928724", "0.5092221", "0.50900704", "0.5079035", "0.50728846", "0.50723225", "0.50713515", "0.50643927", "0.5056056", "0.503726", "0.50258523", "0.50211984", "0.5019789", "0.5011139", "0.5008063", "0.49995556", "0.49933797", "0.49881858", "0.49793014", "0.49466884", "0.49287766", "0.49260136", "0.49236655", "0.49213666", "0.49203503", "0.49202093", "0.49139193", "0.48998898", "0.4874058", "0.48732498", "0.4859822", "0.4858155", "0.48579794", "0.48495093", "0.48420677", "0.4828709", "0.48168266", "0.4812351", "0.48116267", "0.4805899", "0.4805615", "0.479957", "0.47942042", "0.4783767", "0.47737235", "0.47729668", "0.4764117", "0.47624475", "0.4762254" ]
0.7273526
0
Initialize a HOOMD device given the parse arguments.
def make_hoomd_device(args): if args.device == 'CPU': device = hoomd.device.CPU() elif args.device == 'GPU': device = hoomd.device.GPU() else: raise ValueError(f'Invalid device {args.device}.') if not args.verbose: device.notice_level = 0 return device
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize(self):\n self.ha_url = self.args.get(\"ha_url\", None)\n self.use_current_brightness = self.args.get(\"use_current_brightness\", False)\n self.condition = self.args.get(\"condition\")\n self.lights = self.args[\"lights\"]\n self.listen_state(self.change_lights_color, self.args[\"media_player\"], attribute = self.args.get(\"photo_attribute\", \"entity_picture\"))", "def init_argument_parser(modules, device=None):\n parser = ArgumentParser()\n if device:\n parser.add_argument('--help-device', action='store_true',\n help='Print help for arguments specific to device')\n modules.append('libregice')\n init_modules_args(device, parser, modules)\n return parser", "def __init__(self, device_handle):\n\n self.device_handle = device_handle", "def init(*, args: List[str]) -> None:\n logs.show_presentation()\n execute.parse_args(args=args)", "def _Parse():\n prog = sys.argv[0]\n example_usage = ('Example:\\n' +\n ' python %s keyboard 00:11:22:33:44:55\\n' % prog +\n ' python %s mouse 00:11:22:33:44:55\\n'% prog)\n parser = argparse.ArgumentParser(\n description='Emulate a HID device.\\n' + example_usage,\n formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('device',\n choices=['keyboard', 'mouse'],\n help='the device type to emulate')\n parser.add_argument('remote_host_address',\n help='the remote host address')\n parser.add_argument('-c', '--chars_to_send',\n default='echo hello world',\n help='characters to send to the remote host')\n args = parser.parse_args()\n\n if len(args.remote_host_address.replace(':', '')) != 12:\n print '\"%s\" is not a valid bluetooth address.' % args.remote_host_address\n exit(1)\n\n print ('Emulate a %s and connect to remote host at %s' %\n (args.device, args.remote_host_address))\n return args", "def initialize():\n\n parser = build_arg_parser()\n par = parser.parse_known_args()[0]\n\n # Main arguments.\n set('run_mode', par.run_mode)\n set('input_files', par.image)\n\n # Sub-parser specific arguments.\n if par.run_mode == 'train':\n\n set('batch_size', par.batch_size)\n set('drop', par.drop)\n set('epochs', par.epochs)\n set('model', par.model)\n set('level', par.level)\n set('vfrac', par.vfrac)\n set('data_augm', par.data_augm)\n set('summary', par.summary)\n set('outdir', par.outdir)\n # Parameters associated with super-resolution. \n set('super_resolution', par.super_resolution)\n set('generator', par.generator)\n set('discriminator', par.discriminator)\n\n elif par.run_mode == 'predict':\n\n set('tile_edge', par.edge)\n set('model', par.model)\n set('save_conv2d_kernels', par.save_conv2d_kernels) \n set('save_conv2d_outputs', par.save_conv2d_outputs) \n set('colormap', par.colormap)\n # Parameters associated with super-resolution. \n set('super_resolution', par.super_resolution)\n set('generator', par.generator)\n\n elif par.run_mode == 'diagnose': \n \n set('model', par.model) \n \n else:\n \n pass", "def parse_args():\n global default_device\n\n parser = argparse.ArgumentParser(description = 'Initialize OATH token',\n add_help=True,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument('-D', '--device',\n dest='device',\n default=default_device,\n required=False,\n help='YubiHSM device',\n )\n parser.add_argument('--debug',\n dest='debug',\n action='store_true', default=False,\n help='Enable debug operation',\n )\n\n parser.add_argument('--key-handle',\n dest='key_handle',\n required=True,\n help='Key handle to create AEAD',\n metavar='HANDLE',\n )\n parser.add_argument('--oath-k',\n dest='oath_k',\n required=False,\n help='The secret key of the token, hex encoded',\n metavar='HEXSTR',\n )\n\n args = parser.parse_args()\n return args", "def init_arg_parser(args):\n arg_parser = argparse.ArgumentParser(\n description='Control node for the InMoov robot head. Receives movement commands and calculates trajectory.')\n\n arg_parser.add_argument('-p', '--showplot',\n action='store_true',\n dest='showplot',\n help='Triggers display of plot for calculated trajectory')\n\n arg_parser.add_argument('--version', action='version', version='%(prog)s 0.1a')\n\n args = arg_parser.parse_args(args)\n\n return args", "def __init__(self, device_dict):\n diff = set(device_dict.keys()) - set(YAMLKeyword.__dict__.keys())\n if len(diff) > 0:\n six.print_('Wrong key detected:')\n six.print_(diff)\n raise KeyError(str(diff))\n self.__dict__.update(device_dict)\n if self.system == SystemType.android:\n pass\n elif self.system == SystemType.arm_linux:\n try:\n sh.ssh('-q', '%s@%s' % (self.username, self.address),\n 'exit')\n except sh.ErrorReturnCode as e:\n six.print_('device connect failed, '\n 'please check your authentication',\n file=sys.stderr)\n raise e", "def __init__(self,device=None,port=0):\n self.device= Service.initDevice(device)\n self.adbCmd= r'adb -s %s '%self.device\n self.port = port\n if self.port == 0:\n self.port = utils.free_port()", "def __init__(self, device):\n self._unique_id = device\n self._device = AehW4a1(device)\n self._fan_modes = FAN_MODES\n self._swing_modes = SWING_MODES\n self._preset_modes = PRESET_MODES\n self._attr_available = False\n self._on = None\n self._current_temperature = None\n self._target_temperature = None\n self._attr_hvac_mode = None\n self._fan_mode = None\n self._swing_mode = None\n self._preset_mode = None\n self._previous_state = None", "def __init__(self, **device_identifiers):\n\n # Connect to the first available device.\n try:\n self.device = usb.core.find(**device_identifiers)\n except usb.core.USBError as e:\n # On some platforms, providing identifiers that don't match with any\n # real device produces a USBError/Pipe Error. We'll convert it into a\n # DeviceNotFoundError.\n if e.errno == LIBUSB_PIPE_ERROR:\n raise DeviceNotFoundError()\n else:\n raise e\n\n # If we couldn't find a board, bail out early.\n if self.device is None:\n raise DeviceNotFoundError()\n\n # For now, supported boards provide a single configuration, so we\n # can accept the first configuration provided.\n self.device.set_configuration()\n\n # Run the parent initialization.\n super(USBCommsBackend, self).__init__(**device_identifiers)", "def __init__(self):\n self.hw = dev_hwinfo.device()\n self.ethKey=\"Ethernet\"\n self.ethAllInterfaceName=[]\n dir_path = os.path.dirname(os.path.realpath(__file__))\n self.myDefine = init_define.main()\n self.mPlatform=self.hw.getPlatform()", "def init(self):\r\n self._parse_options(self._force_args)\r\n self._maybe_daemonize()\r\n self._setup_modules()\r\n self._state = self.INITIALIZED", "def __init__(self, name, host):\n\n self._device = OppleLightDevice(host)\n\n self._name = name\n self._is_on = None\n self._brightness = None\n self._color_temp = None", "def init(self, *args):\n return self.cmd('init', *args)", "def __init__(self, args, data_path, data_dir, device, log, x_shape):\r\n self._args = args\r\n self._data_path = data_path\r\n self._data_dir = data_dir\r\n self._device = device\r\n self._x_shape = x_shape\r\n self._log = log", "def __init__(self, temperature=None, daba=True, *args, **kwargs):\n super(DATA, self).__init__(*args, **kwargs)\n self.daba = daba\n self.temperature = temperature\n self.argv = None\n self.config = None", "def initialize(self,*args,**kwargs):\n self.__instrumentID = c_uint32(0) \n self.__numInstruments = c_uint32()\n self.__nbrOfChannels = c_uint32()\n self.__nbrADCBits = c_uint32()\n self.__temperature = c_int32()\n self.__time_us = c_double()\n\n self.loadDLLs(**kwargs) # Load the different DLLs or DLL based modules\n self.reinit() # init or reinit the board\n self.createDictAndGlobals() # create dictionaries and global variables\n self.nbrOfChannels=int(self.__nbrOfChannels.value) # duplicate self.nbrOfChannels in a Python type variable \n self.getInitialConfig()", "def init_device(platform=\"Android\", uuid=None, **kwargs):\n cls = import_device_cls(platform)\n dev = cls(uuid, **kwargs)\n # Add device instance in G and set as current device.\n G.add_device(dev)\n return dev", "def set_parameters(self, args):\n self.args = args\n\n if args.testing:\n self.delay_close()\n\n if args.source == \"simulation\":\n log.info(\"Create simulated spectra device\")\n self.dev = simulation.SimulatedSpectraDevice()\n\n elif args.source == \"sled\":\n log.info(\"Create single sled cobra\")\n self.dev = simulation.SimulatedCobraSLED()\n\n elif args.source == \"cobra\":\n log.info(\"Create DALSA cobra device\")\n #self.dev = devices.DalsaCobraDevice()\n self.dev = DALSA.Cobra()\n\n elif args.source == \"opto\":\n log.info(\"Create OPTO sensor cobra device\")\n self.dev = DALSA.OPTOCobra()\n\n elif args.source == \"basler\":\n log.info(\"Create DALSA basler device\")\n #self.dev = devices.DalsaBaslerDevice()\n self.dev = DALSA.BaslerSprint4K()\n\n self.dev.setup_pipe()\n self.setup_pipe_timer()", "def initialise(self, args, environ):", "def initialize(self, args):\n\t\tpass", "def _init_system(*args):\n __set_time_elements(args[0], args[1])\n __set_control_elements(args[0], args[2], args[3])\n __set_sensor_elements(args[0], args[4], args[5], args[6], args[7])", "def __init__(self, root, io):\n parts.hand.Hand.__init__(self, root=root, io=io)\n\n dxl_motors = OrderedDict({\n name: dict(conf)\n for name, conf in self.dxl_motors.items()\n })\n\n self.attach_dxl_motors(dxl_motors)\n\n \"\"\"\n self._load_sensor = self.io.find_module('force_gripper')\n self._load_sensor.offset = 4\n self._load_sensor.scale = 10000\n \"\"\"", "def __init__(self, hass: HomeAssistantType, entry: ConfigEntry) -> None:\n self.hass = hass\n self.entry = entry\n self.entry_id = entry.entry_id\n self.unique_id = entry.unique_id\n self._host = entry.data.get(CONF_HOST)\n self._port = entry.data.get(CONF_PORT)\n self._ssl = entry.data.get(CONF_SSL)\n self._username = entry.data.get(CONF_USERNAME)\n self._password = entry.data[CONF_PASSWORD]\n\n self._info = None\n self.model = None\n self.device_name = None\n self.firmware_version = None\n\n self._method_version = 1\n consider_home_int = entry.options.get(\n CONF_CONSIDER_HOME, DEFAULT_CONSIDER_HOME.total_seconds()\n )\n self._consider_home = timedelta(seconds=consider_home_int)\n\n self._api: Netgear = None\n self._attrs = {}\n\n self.devices = {}", "def __init__(self):\n self.server_name = 'Binary Light Device'\n self.device = None", "def init(self, args, **kwargs):\n # Retrieve configuration file and directory or set defaults.\n conf_file = os.path.expanduser(\n args._get('conf_file', kwargs.pop('conf_file', DEFAULT_CONF_FILE)))\n conf_dir = os.path.expanduser(\n args._get('conf_dir', kwargs.pop('conf_dir', DEFAULT_CONF_DIR)))\n commands = [value for (arg, value) in sorted(args) if arg.startswith('command')]\n\n # Load main configuration file.\n if os.path.exists(conf_file):\n self.load_cmd_file(conf_file)\n\n # Load intermediary configuration files.\n if os.path.isdir(conf_dir):\n self.load_dir(conf_dir, clg.config, commands)", "def __init__(self, manager, device_config, log_file_name, log_directory):\n super().__init__(\n manager,\n device_config,\n log_file_name=log_file_name,\n log_directory=log_directory)\n self._commands.update(COMMANDS)\n self._regexes.update(REGEXES)\n self._timeouts.update(TIMEOUTS)\n self._serial_port = None", "def init_parser(parser):\n parser.add_argument(\"--device\", \"-d\",\n help=\"Device to record video from\",\n type=types.connected_android_device,\n default=defaults.connected_android_device()).completer = completion.android_devices\n parser.add_argument(\"--bitrate\", \"-b\",\n help=\"Video bit rate, by default 8000000 (6Mbps)\",\n type=int,\n default=8000000)\n parser.add_argument(\"--timeout\", \"-t\",\n help=\"Maximum video duration, seconds (shouldn't exceed 180)\",\n type=types.adb_video_limit,\n default=180)\n parser.add_argument(\"--compress\", \"-c\",\n help=\"Compress video after recording or not, by default True\",\n type=bool,\n default=True).completer = completion.truefalse", "def __init__(self, device):\n self.logger = logging.getLogger('ADB')\n self.device = device\n self.cmd_prefix = ['adb']\n\n r = subprocess.check_output(['adb', 'devices']).split('\\n')\n if not r[0].startswith(\"List of devices attached\"):\n raise ADBException()\n\n online_devices = []\n for line in r[1:]:\n if not line:\n continue\n segments = line.split(\"\\t\")\n if len(segments) != 2:\n continue\n if segments[1].strip() == \"device\":\n online_devices.append(segments[0])\n\n if not online_devices:\n raise ADBException()\n\n if device.serial:\n if device.serial not in online_devices:\n raise ADBException()\n else:\n device.serial = online_devices[0]\n\n self.cmd_prefix.append(\"-s\")\n self.cmd_prefix.append(device.serial)\n\n if self.check_connectivity():\n self.logger.info(\"adb successfully initiated, the device is %s\" % device.serial)\n else:\n raise ADBException()", "def __init__(self, device: Dict[str, Union[str, int, float]]) -> None:\n super().__init__()\n log_int.debug(f\"Initializing serial device for {device['name']} :: Port: {device['port']} - Baud rate: \"\n f\"{device['baudrate']} - Timeout: {device['timeout']} - RTS/CTS: {bool(device['flowcontrol'])}\")\n self.device = serial.Serial(port=device['port'], baudrate=device['baudrate'],\n timeout=device['timeout'], rtscts=device['flowcontrol'])", "def initialize():\n\n global cmdarg\n # Open syslog for error message tracking\n syslog.openlog(\"munin-chrony\", 0, syslog.LOG_DAEMON)\n\n # Try to get the command-line argument, if there is one (usually either\n # 'config' or nothing)\n try:\n cmdarg = sys.argv[1]\n except IndexError:\n # It's not actually an error if this is out of range -- it just means\n # there wasn't an argument, so don't run in config mode\n cmdarg = \"\"", "def __init__(self, arlo, device, sensor_type):\n\n sensor_details = SENSOR_TYPES[sensor_type]\n\n if device is None:\n self._name = sensor_details[0]\n self._unique_id = sensor_type\n self._device = arlo\n else:\n self._name = \"{0} {1}\".format(sensor_details[0], device.name)\n self._unique_id = (\n \"{0}_{1}\".format(sensor_details[0], device.entity_id)\n .lower()\n .replace(\" \", \"_\")\n )\n self._device = device\n\n self._sensor_type = sensor_type\n self._icon = \"mdi:{}\".format(sensor_details[2])\n self._state = None\n self._attr = sensor_details[3]\n _LOGGER.info(\"ArloSensor: %s created\", self._name)", "def initialize(self, args):\n # You must parse model_config. JSON string is not parsed here\n self.model_config = json.loads(args['model_config'])\n print(\"model_config:\", self.model_config)\n\n self.input_names = []\n for input_config in self.model_config[\"input\"]:\n self.input_names.append(input_config[\"name\"])\n print(\"postprocess input names:\", self.input_names)\n\n self.output_names = []\n self.output_dtype = []\n for output_config in self.model_config[\"output\"]:\n self.output_names.append(output_config[\"name\"])\n dtype = pb_utils.triton_string_to_numpy(output_config[\"data_type\"])\n self.output_dtype.append(dtype)\n print(\"postprocess output names:\", self.output_names)\n self.postprocessor = fd.vision.ocr.DBDetectorPostprocessor()\n self.cls_preprocessor = fd.vision.ocr.ClassifierPreprocessor()\n self.rec_preprocessor = fd.vision.ocr.RecognizerPreprocessor()\n self.cls_threshold = 0.9", "def __init__(self, hdw=['Soundcard'], devicename='dev1'):\n self.debugFlag = False\n self.task = None # NI Task\n self.required_hardware = hdw # Require specific hardware \n self.hardware = [] # list of hardware actually found on this system\n self.find_hardware(device_info={'devicename': devicename}) # population the self.hardware list", "def setup_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-u\", \"--url\", dest='url', required=True,\n help=\"Falkonry Edge URL\")\n parser.add_argument(\"-i\", \"--input_file\", dest='input', required=True,\n help=\"Input data file to feed into Falkonry Edge Analyzer\")\n parser.add_argument(\"-o\", \"--output_file\", dest='output', required=True,\n help=\"File name to write Falkonry Edge Analyzer output\")\n parser.add_argument(\"-t\", \"--time_column\", dest='time', type=int, required=True,\n help=\"Time column index starting with 0\")\n parser.add_argument(\"-z\", \"--time_zone\", dest='zone', required=True,\n help=\"Time zone\")\n parser.add_argument(\"-f\", \"--time_format\", dest='format', required=True,\n help=\"Timestamp format\")\n parser.add_argument(\"-e\", \"--entity_column\", dest='entity', type=int,\n help=\"Entity column index starting with 0\")\n parser.add_argument(\"-b\", \"--batch_column\", dest='batch', type=int,\n help=\"Batch column index starting with 0\")\n parser.add_argument(\"-r\", \"--input_feed_rate\", dest='rate', type=int, default=1000,\n help=\"Number of records to send to edge per second.\")\n\n return parser", "def __init__(self, tado, device_name, device_id, device_variable):\n self._tado = tado\n\n self.device_name = device_name\n self.device_id = device_id\n self.device_variable = device_variable\n\n self._unique_id = f\"{device_variable} {device_id} {tado.device_id}\"\n\n self._state = None\n self._state_attributes = None\n self._tado_device_data = None\n self._undo_dispatcher = None", "def _post_parser_init(self, args):\n self.host = args.host if args.host else 'localhost'\n self.protocol = args.protocol\n self.id = args.id\n self.server = DerivaServer(self.protocol,\n args.host,\n credentials=DerivaCatalogCLI._get_credential(self.host,\n token=args.token,\n oauth2_token=args.oauth2_token))", "def __init__(\n self,\n netatmo_device: NetatmoDevice,\n ) -> None:\n super().__init__(netatmo_device.data_handler)\n\n self._switch = cast(NaModules.Switch, netatmo_device.device)\n\n self._id = self._switch.entity_id\n self._attr_name = self._device_name = self._switch.name\n self._model = self._switch.device_type\n self._config_url = CONF_URL_CONTROL\n\n self._home_id = self._switch.home.entity_id\n\n self._signal_name = f\"{HOME}-{self._home_id}\"\n self._publishers.extend(\n [\n {\n \"name\": HOME,\n \"home_id\": self._home_id,\n SIGNAL_NAME: self._signal_name,\n },\n ]\n )\n self._attr_unique_id = f\"{self._id}-{self._model}\"\n self._attr_is_on = self._switch.on", "def __init__(self, **kwds):\n self.system=self.username=self.password=\"\"\n if kwds.has_key(\"system\"):\n self.system=kwds[\"system\"]\n if kwds.has_key(\"username\"):\n self.username=kwds[\"username\"]\n if kwds.has_key(\"password\"):\n self.password=kwds[\"password\"]\n if kwds.has_key(\"element\"):\n self.fromElement(kwds[\"element\"])", "def doInitializeDevice(self):\n try:\n\n if self.serialNumber == \"*\" or self.serialNumber == \".*\":\n self.device = OISpectrometer.matchUniqueUSBDevice( idProduct=self.idProduct)\n else:\n self.device = OISpectrometer.matchUniqueUSBDevice( idProduct=self.idProduct,\n serialNumber=self.serialNumber)\n\n \"\"\" Below are all the USB protocol details. This requires reading\n the USB documentation, the Spectrometer documentation and many other \n details. What follows may sound like gibberish.\n\n There is a single USB Configuration (default) with a single USB Interface \n without alternate settings, so we can use (0,0).\n \"\"\"\n self.device.set_configuration()\n self.configuration = self.device.get_active_configuration()\n self.interface = self.configuration[(0,0)]\n\n \"\"\"\n We are working on the reasonable assumption from the documentation\n that the first input and output endpoints are the main endpoints and the\n second input is the data endpoint. If that is not the case, the subclass can\n simply reassign the endpoints properly in its __init__ function. \n \"\"\"\n for endpoint in self.interface:\n \"\"\" The endpoint address has the 8th bit set to 1 when it is an input.\n We can check with the bitwise operator & (and) 0x80. It will be zero\n if an output and non-zero if an input. \"\"\"\n if endpoint.bEndpointAddress & 0x80 != 0:\n self.inputEndpoints.append(endpoint)\n else:\n self.outputEndpoints.append(endpoint)\n\n\n if len(self.inputEndpoints) >= 2 or len(self.outputEndpoints) > 0:\n \"\"\" We have at least 2 input endpoints and 1 output. We assign the\n endpoints according to the documentation, otherwise\n the subclass will need to assign them.\"\"\"\n self.epCommandOut = self.outputEndpoints[self.epCommandOutIdx]\n self.epMainIn = self.inputEndpoints[self.epMainInIdx]\n self.epSecondaryIn = self.inputEndpoints[self.epSecondaryInIdx]\n self.epParameters = self.inputEndpoints[self.epParametersIdx]\n self.epStatus = self.inputEndpoints[self.epStatusIdx]\n\n self.flushEndpoints()\n self.sendCommand(b'0x01')\n time.sleep(0.1)\n self.getCalibration()\n except Exception as err:\n raise UnableToInitialize(\"Error when initializing device: {0}\".format(err))", "async def initialize(self, hw_init=False, init_speed: str = \"200 sec / stroke\"):\n await self.pump_io.initialize()\n # Test connectivity by querying the pump's firmware version\n fw_cmd = Protocol1Command(command=\"U\", target_pump_num=self.address)\n self.metadata.version = await self.pump_io.write_and_read_reply_async(fw_cmd)\n logger.info(\n f\"Connected to Hamilton ML600 {self.name} - FW version: {self.metadata.version}!\"\n )\n\n if hw_init:\n await self.initialize_pump(speed=ureg.Quantity(init_speed))", "def setup_argparser():\n parser = argparse.ArgumentParser(\n description='Waldo client.')\n\n parser.add_argument('--server', dest='server',\n default=os.environ.get('WALDO_SERVER',\n consts.PRODUCTION))\n\n # Token/username/password can be passed in 3 ways:\n # Command-line, environment var, keyring\n parser.add_argument('--token', dest='token',\n default=os.environ.get(\n 'WALDO_TOKEN',\n keyring.get_password('waldoclient', 'token')),\n help='Racker auth token')\n\n parser.add_argument('--username', dest='username',\n default=os.environ.get(\n 'WALDO_USERNAME',\n keyring.get_password('waldoclient', 'username')),\n help='Racker SSO username. Securely store this '\n 'value in your keyring by running: '\n '`keyring set waldoclient username`.')\n\n parser.add_argument('--password', dest='password',\n default=os.environ.get(\n 'WALDO_PASSWORD',\n keyring.get_password('waldoclient', 'password')),\n help='Racker SSO password. Securely store this '\n 'value in your keyring by running: '\n '`keyring set waldoclient password`.')\n\n verbose = parser.add_mutually_exclusive_group()\n verbose.add_argument('--debug', dest='debug', action='store_true',\n default=False, help='output debug messages')\n verbose.add_argument('--quiet', dest='debug', action='store_true',\n help='suppress output debug messages')\n return parser", "def setup_parser(self, parser, args):\r\n\r\n pass", "def _init_parser():\n\t\n\t_parser = argparse.ArgumentParser()\n\t_parser.add_argument(\"--pull\", help=\"pull scripts from UR3\", action=\"store_true\")\n\t_parser.add_argument(\"--create\", help=\"create data base from script files\", action=\"store_true\")\n\t_parser.add_argument(\"--clear\", help=\"clear all data base\", action=\"store_true\")\n\treturn _parser", "def setup_parser():\n parser = HelpfulParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('infile', type=str, help=\"input data file\")\n\n parser.add_argument('-u', '--usage', action=\"help\",\n help=\"show this help message and exit\")\n parser.add_argument('-h', '--host', metavar='HOST', type=str,\n default='localhost', help='Server hostname')\n parser.add_argument('-p', '--port', metavar='PORT', type=int,\n default='3000', help='Server port')\n parser.add_argument('-U', '--user', metavar='USER', type=str,\n default=None, help='Username')\n parser.add_argument('-P', '--passwd', metavar='PW', type=str,\n default=None, help='Password')\n parser.add_argument('-n', '--nspace', metavar='NS', type=str,\n default='test', help='Namespace')\n parser.add_argument('-s', '--set', metavar='SET', type=str,\n default='osm', help='Set name')\n return parser", "def __init__(self):\n nvmlInit()\n n_devices = nvmlDeviceGetCount()\n devices_handlers_list = [nvmlDeviceGetHandleByIndex(i) for i in range(n_devices)]\n\n self.devices = {\n '{}-{}'.format(NvmlHandler.exec_nvml_function(nvmlDeviceGetName, device).decode('ascii'), i): device\n for i, device in enumerate(devices_handlers_list)\n }", "def __init__(self, device):\n self.device = device\n self.device.get_active_configuration()", "def __init__(self, *argv, **kwargs):\n self.refs = {}\n self.ref0s = {}\n self.defect_refs = {}\n\n self.initialize(*argv, **kwargs)", "def setup_parser(parser):\n\n parser.add_argument('input', metavar='INPUT', type=str, nargs='*',\n help='read tracking result from file named INPUT')\n parser.add_argument('-x', nargs = 2, type=int, help='x-range')\n parser.add_argument('-y', nargs = 2, type=int, help='y-range')\n parser.add_argument('-t', nargs = 2, type=int, help='t-range')\n parser.add_argument('--count', type = int, default = 0, \n help = 'target number of observation per chunk')\n parser.add_argument('--apron', type = int, default = 4, \n help = 'size of the chunk overlap')", "def __init__(self, ds=None, **kwargs) :\n self._name = self.__class__.__name__\n print('In %s.__init__' % self._name)\n\n HexDataIO.__init__(self, **kwargs)\n\n DIO = self\n if ds is None :\n DIO.open_input_data(self.DSNAME, **kwargs)\n else :\n DIO.use_psana_dataset(ds, pbits=0o377 if self.VERBOSE else 0)\n \n self._init_calib_and_sorter()\n\n self.t0_sec = self.t1_sec = time()", "def open(self):\n # Move all of the connection arguments into connect_args\n connect_args = {}\n\n # check for mode\n if self.get_option('port') is None:\n if self.get_option('mode') == 'telnet':\n connect_args['port'] = 23\n elif self.get_option('mode') == 'serial':\n connect_args['port'] = '/dev/ttyUSB0'\n else:\n connect_args['port'] = 830\n else:\n connect_args['port'] = self.get_option('port')\n\n if (self.get_option('mode') == 'telnet' or\n self.get_option('mode') == 'serial'):\n if self.get_option('baud') is None:\n # Default baud if serial or telnet mode\n connect_args['baud'] = 9600\n if self.get_option('attempts') is None:\n # Default attempts if serial or telnet mode\n connect_args['attempts'] = 10\n\n connect_args['host'] = self.get_option('host')\n # connect_args['port'] = self.get_option('port')\n connect_args['user'] = self.get_option('remote_user')\n connect_args['passwd'] = self.get_option('password')\n connect_args['ssh_private_key_file'] = self.get_option('private_key_file')\n connect_args['ssh_config'] = self.get_option('pyez_ssh_config')\n connect_args['timeout'] = self.get_option('persistent_connect_timeout')\n try:\n log_connect_args = dict(connect_args)\n log_connect_args[\"passwd\"] = \"NOT_LOGGING_PARAMETER\"\n\n self.queue_message(\"vvvv\", \"Creating device parameters: %s\" % log_connect_args)\n timeout = connect_args.pop(\"timeout\")\n self.dev = jnpr.junos.device.Device(**connect_args)\n self.queue_message(\"vvvv\", \"Opening device.\")\n self.dev.open()\n self.queue_message(\"vvvv\", \"Device opened.\")\n\n self.dev.timeout = self.get_option('persistent_command_timeout')\n self.queue_message(\"vvvv\", \"Setting default device timeout to %d.\" % timeout)\n # Exceptions raised by close() or open() are all sub-classes of\n # ConnectError, so this should catch all connection-related exceptions\n # raised from PyEZ.\n except pyez_exception.ConnectError as ex:\n raise AnsibleError(\"Unable to make a PyEZ connection: %s\" % (str(ex)))", "def __init__(self, device_configuration, command_parser):\n\n # Call the VirtualDriver constructor\n super(MXL_Balloon_Tracker,self).__init__(device_configuration, command_parser)\n\n # Initialize the driver's command handler\n self._command_handler = BalloonHandler(self)\n\n # Create the Direct Downlink APRS tracking service\n self._aprs_service = Direct_Downlink_APRS_Service('direct_downlink_aprs_service', 'tracker', device_configuration)\n\n # Setup tracker attributes\n self.last_known_location = None", "def __init__(self, device, name, xiaomi_hub):\n self._sid = device['sid']\n self._name = '{}_{}'.format(name, self._sid)\n self.parse_data(device['data'])\n\n self.xiaomi_hub = xiaomi_hub\n xiaomi_hub.XIAOMI_HA_DEVICES[self._sid].append(self)", "def __init__(self, device: Thermostat, gateway: DeconzGateway) -> None:\n super().__init__(device, gateway)\n\n self._attr_hvac_modes = [\n HVACMode.HEAT,\n HVACMode.OFF,\n ]\n if device.mode:\n self._attr_hvac_modes.append(HVACMode.AUTO)\n\n if \"coolsetpoint\" in device.raw[\"config\"]:\n self._attr_hvac_modes.append(HVACMode.COOL)\n\n self._deconz_to_hvac_mode = {\n HVAC_MODE_TO_DECONZ[item]: item for item in self._attr_hvac_modes\n }\n\n self._attr_supported_features = ClimateEntityFeature.TARGET_TEMPERATURE\n\n if device.fan_mode:\n self._attr_supported_features |= ClimateEntityFeature.FAN_MODE\n self._attr_fan_modes = list(FAN_MODE_TO_DECONZ)\n\n if device.preset:\n self._attr_supported_features |= ClimateEntityFeature.PRESET_MODE\n self._attr_preset_modes = list(PRESET_MODE_TO_DECONZ)", "def __init__(self, *args, **kwargs):\n self.__is_connected__ = False\n self.logger = kwargs.get('logger',None)\n if ( self.logger is None ):\n # Get an instance of a logger\n console = logging.StreamHandler()\n formatter = logging.Formatter('%(asctime)s: %(levelname)-8s %(message)s',\"%Y-%m-%d %H:%M:%S\")\n console.setFormatter(formatter)\n logging.getLogger('').addHandler(console)\n self.logger = logging.getLogger('')\n self.logger.setLevel(logging.INFO)\n # initial log entry\n self.logger.debug(\"%s: %s version [%s]\" % (self.__class__.__name__, inspect.getfile(inspect.currentframe()),__version__))\n # initialize variables - so all are listed here for convenience\n self.dict_config = {} # dictionary, see cdh_manager.cfg example\n self.__cm_cdh__ = None\n self.__boto_ec2__ = None\n self.data = DataObjectSample(logger=self.logger)", "def init(*args):\n global dataset\n dataset = args[0]", "def initialize_command(parser):\n parser.add_argument('--config', dest='config',\n help='the pyramid config file')\n args = parser.parse_args()\n assert args.config,\\\n 'You must have a --config config.ini, or we can\\'t run the command'\n # load the pyramid config env\n env = bootstrap(args.config)\n # load the database\n initialize_sql(\n env['registry'].settings.get('sqlalchemy.url'))\n # return the args and the pyramid env for later use\n return args, env", "def __init__(self, device_mode, loop):\n self.loop = loop\n self.device_mode = device_mode\n if self.device_mode == \"stationary\":\n self.openface = OpenFaceInstance()\n self.openface.startProcess()\n self.stationary_eye_tracker = StationaryEyeTracker()\n elif self.device_mode == \"mobile\":\n self.openpose = OpenPoseInstance()\n self.openpose.startProcess()\n self.mobile_eye_tracker = MobileEyeTracker()\n self.mobile_eye_tracker.calibrate()\n\n self.wristband = Wristband(self.loop)", "def __init__(self, config, weather=None, **kwargs):\n\n super().__init__(weather, **kwargs)\n\n config = self.initialize_library(config, **kwargs)\n self.config = self.validate_config(config)\n\n self.initialize_port()\n self.setup_simulation(**kwargs)", "def __init__(self, hass, entry, device: aioshelly.Device):\n super().__init__(\n hass,\n _LOGGER,\n name=device.settings[\"name\"] or device.settings[\"device\"][\"hostname\"],\n update_interval=timedelta(seconds=5),\n )\n self.hass = hass\n self.entry = entry\n self.device = device", "def init():\n try:\n h = hid.device()\n h.open(USB_VID, USB_PID)\n h.set_nonblocking(1)\n except IOError as ex:\n print('ERROR: could not establish connection to device')\n print(ex)\n return None\n return h", "def __init__(self, device_id, interior_sensor=\"null\", exterior_sensor=\"null\"):\r\n self.device_id = device_id\r\n self.interior_sensor = interior_sensor\r\n self.exterior_sensor = exterior_sensor", "def __init__(self, env, name):\n NetworkDevice.__init__(self, env, name, 1)\n self.env.process(self.listen_for_messages(self.echo))", "def parseArguments(self):\n # ----------------------------------\n # Load default settings\n self.configfile = DEFAULT_CONFIG_FILE\n self.baudrate = DEFAULT_BAUDRATE\n self.logfile = DEFAULT_LOG_FILE\n self.timeout = DEFAULT_TIMEOUT\n self.loglevel = DEFAULT_LOG_LEVEL\n self.device = False\n\n # ----------------------------------\n # Parse arguments\n args_raw = ''\n if (len(sys.argv) > 1):\n args_raw = string.join(sys.argv)\n self.args_obj = None\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\"\"\"\\\nConnects to a serial device and sends and receives data.\n\"\"\",\n usage='serial_logger_gui.py -h | [-l log_file] [--loglevel=LEVEL] [-c config_file] [-t timeout] [-b baud_rate] [device_path] ',\n add_help=False\n )\n parser.add_argument('device', nargs='?',\n help='device path or id of the serial port')\n parser.add_argument('-b', '--baud', dest='baud_rate', nargs=1, type=int,\n help='baud rate for the serial connection (default: {0!s})'.format(self.baudrate))\n parser.add_argument('-c','--config', dest='config_file', nargs=1,\n help='config file with default values (default: {0})'.format(self.configfile))\n parser.add_argument('-t', '--timeout', dest='timeout', nargs=1, type=int,\n help='serial connection timeout in seconds (default: {0!s})'.format(self.timeout))\n parser.add_argument('-l','--log', dest='log_file', nargs=1,\n help='log file to record session to (default: {0})'.format(self.logfile))\n parser.add_argument('--loglevel', dest='log_level', action='store', default=self.loglevel,\n help='sets the logging level (default: %(default)s)', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'])\n parser.add_argument('-h', '--help', action='store_true', dest='want_help',\n help='show this help message and exit')\n\n # Actually parse the arguments given\n try:\n self.args_obj = parser.parse_args()\n #pp.pprint(args_obj)\n if (self.args_obj.want_help):\n parser.print_help()\n self.quit()\n if (self.args_obj.config_file):\n self.configfile = self.args_obj.config_file\n if (self.args_obj.log_file):\n self.logfile = self.args_obj.log_file\n if (self.args_obj.baud_rate):\n self.baudrate = self.args_obj.baud_rate\n if (self.args_obj.device):\n self.device = self.args_obj.device\n if (self.args_obj.timeout):\n self.timeout = self.args_obj.timeout\n\n except Exception, ex:\n # Handle exception here\n raise\n self.quit()", "def __init__(self, pidevice, **kwargs):\n debug('create an instance of ControllerStartup(kwargs=%s)', itemstostr(kwargs))\n\n if not isdeviceavailable([GCS2Commands, GCS21Commands], pidevice):\n raise TypeError('Type %s of pidevice is not supported!' % type(pidevice).__name__)\n\n self.pidevice = pidevice\n self._stages = None\n self._refmodes = None\n self._servo = None\n self._axesnames = None\n self._kwargs = kwargs\n self._databuf = {'servobuf': {}, 'cstdone': []}\n self.prop = {\n 'devname': self.pidevice.devname, 'skipcst': False, 'forcecst': False, 'skipsai': False,\n 'forcesai': False, 'showlog': False, 'skipini': False, 'skiponl': False, 'skipeax': False,\n 'skipref': False, 'forceref': False, 'skipfph': False,\n }", "def __init__(self, config=None):\n config_dict = {}\n if config:\n config_dict = json.load(config)\n\n self.android = config_dict.get(\"android\")\n self.linux = config_dict.get(\"linux\")\n self.atf = config_dict.get(\"atf\")\n self.qemu = config_dict.get(\"qemu\", \"qemu-system-aarch64\")", "def __init__(\n self,\n unique_id: str,\n device_name: str,\n target_temperature: float | None,\n unit_of_measurement: str,\n preset: str | None,\n current_temperature: float,\n fan_mode: str | None,\n target_humidity: int | None,\n current_humidity: int | None,\n swing_mode: str | None,\n hvac_mode: HVACMode,\n hvac_action: HVACAction | None,\n aux: bool | None,\n target_temp_high: float | None,\n target_temp_low: float | None,\n hvac_modes: list[HVACMode],\n preset_modes: list[str] | None = None,\n ) -> None:\n self._unique_id = unique_id\n self._attr_supported_features = SUPPORT_FLAGS\n if target_temperature is not None:\n self._attr_supported_features |= ClimateEntityFeature.TARGET_TEMPERATURE\n if preset is not None:\n self._attr_supported_features |= ClimateEntityFeature.PRESET_MODE\n if fan_mode is not None:\n self._attr_supported_features |= ClimateEntityFeature.FAN_MODE\n if target_humidity is not None:\n self._attr_supported_features |= ClimateEntityFeature.TARGET_HUMIDITY\n if swing_mode is not None:\n self._attr_supported_features |= ClimateEntityFeature.SWING_MODE\n if aux is not None:\n self._attr_supported_features |= ClimateEntityFeature.AUX_HEAT\n if HVACMode.HEAT_COOL in hvac_modes or HVACMode.AUTO in hvac_modes:\n self._attr_supported_features |= (\n ClimateEntityFeature.TARGET_TEMPERATURE_RANGE\n )\n self._target_temperature = target_temperature\n self._target_humidity = target_humidity\n self._unit_of_measurement = unit_of_measurement\n self._preset = preset\n self._preset_modes = preset_modes\n self._current_temperature = current_temperature\n self._current_humidity = current_humidity\n self._current_fan_mode = fan_mode\n self._hvac_action = hvac_action\n self._hvac_mode = hvac_mode\n self._aux = aux\n self._current_swing_mode = swing_mode\n self._fan_modes = [\"on_low\", \"on_high\", \"auto_low\", \"auto_high\", \"off\"]\n self._hvac_modes = hvac_modes\n self._swing_modes = [\"auto\", \"1\", \"2\", \"3\", \"off\"]\n self._target_temperature_high = target_temp_high\n self._target_temperature_low = target_temp_low\n self._attr_device_info = DeviceInfo(\n identifiers={(DOMAIN, unique_id)},\n name=device_name,\n )", "def __init__(self):\n super(DaosServer.ServerStartSubCommand, self).__init__(\n \"/run/daos_server/start/*\", \"start\")\n self.port = FormattedParameter(\"-p {}\")\n self.storage = FormattedParameter(\"-s {}\")\n self.modules = FormattedParameter(\"-m {}\")\n self.targets = FormattedParameter(\"-t {}\")\n self.xshelpernr = FormattedParameter(\"-x {}\")\n self.firstcore = FormattedParameter(\"-f {}\")\n self.group = FormattedParameter(\"-g {}\")\n self.sock_dir = FormattedParameter(\"-d {}\")\n self.insecure = FormattedParameter(\"-i\", True)\n self.recreate = FormattedParameter(\"--recreate-superblocks\", False)", "def __init__(self, argv=[]):\n self.log_level = 'error'\n self.log_path = None\n self.name = self.__class__.__name__\n self.name_set = False\n self.verbose = False\n\n # helper to allow using string for configuration\n if argv is not None and isinstance(argv, basestring):\n argv = argv.split() # convert string to args style list\n\n # determine if a name has benn set for the instantiating windmill instance\n if argv and '--name' in argv:\n self.name_set = True\n\n self._execute_configuration(argv)", "def initialize():\n\n parser = argparse.ArgumentParser(\n description='This function takes a gene count file, a gene name, and \\\n an output file as parameters, and creates a file with the \\\n sample IDs and counts for that gene.')\n parser.add_argument('-i',\n '--data',\n type=str,\n help='The file name of the dataset.',\n required=True)\n parser.add_argument('-g',\n '--gene',\n type=str,\n help='The name of the target gene.',\n required=True)\n parser.add_argument('-o',\n '--output',\n type=str,\n help='The file name of the output file.',\n required=True)\n\n args_parse = parser.parse_args()\n\n return args_parse", "def __init__(\n self, name=None, dm_name=None, appname=None, verbose=0,\n version=__version__, base_dir=None, use_stderr=False,\n simulate=False, sudo=False, quiet=False,\n *targs, **kwargs):\n\n # Normalisation of 'name' and 'dm_name'\n if name is not None:\n name = str(name).strip()\n\n if dm_name is not None:\n dm_name = str(dm_name).strip()\n\n # One of those two parameters must be valid:\n if not name and not dm_name:\n msg = _(\n \"In minimum one parameter of 'name' and 'dm_name' \"\n \"must be given on initialisation of a %s.\") % (\n self.__class__.__name__)\n raise DmDeviceInitError(msg)\n\n super(DeviceMapperDevice, self).__init__(\n name=name,\n appname=appname,\n verbose=verbose,\n version=version,\n base_dir=base_dir,\n use_stderr=use_stderr,\n simulate=simulate,\n sudo=sudo,\n quiet=quiet,\n )\n self.initialized = False\n\n if not name:\n name = self.retr_blockdev_name(dm_name)\n self._name = name\n\n self._dm_name = dm_name\n \"\"\"\n @ivar: the devicemapper name of the device\n @type: str\n \"\"\"\n\n failed_commands = []\n\n self._dmsetup_cmd = DMSETUP_CMD\n \"\"\"\n @ivar: the dmsetup command for manipulating the devicemapper device\n @type: str\n \"\"\"\n if not os.path.exists(self.dmsetup_cmd) or not os.access(\n self.dmsetup_cmd, os.X_OK):\n self._dmsetup_cmd = self.get_command('dmsetup')\n if not self.dmsetup_cmd:\n failed_commands.append('dmsetup')\n\n self._suspended = None\n \"\"\"\n @ivar: flag that the current device is in suspended mode\n @type: bool or None\n \"\"\"\n\n self._uuid = None\n \"\"\"\n @ivar: the devicemapper UUID\n @type: str or None\n \"\"\"\n\n self._table = None\n \"\"\"\n @ivar: the device mapper table (whatever it is)\n @type: str\n \"\"\"\n\n # Some commands are missing\n if failed_commands:\n raise CommandNotFoundError(failed_commands)\n\n self.initialized = True\n if self.verbose > 3:\n LOG.debug(_(\"Initialized.\"))", "def __init__(self, meta, api):\n _LOGGER.debug(\"Initializing device: %s\", meta['name'])\n self._id = meta['id']\n self._name = meta['name']\n \"\"\"Husqvarna API stoppped returning model number, HA prior to 2021.9 defaulted to none.\"\"\"\n self._model = None\n self._state = None\n self._mower_status = None\n self._stored_timestamp = None\n self._see = None\n\n # clone already authenticated api client and\n # select automower for this instance\n self._api = copy.copy(api)\n self._api.select_robot(self._id)", "def __init__(self, data: dict):\n # Check if all elements are in the passed dict, else raise an Error\n if any(k not in data for k in [\"locations\", \"info\"]):\n raise DIRECTVError(\n \"DirecTV data is incomplete, cannot construct device object\"\n )\n self.update_from_dict(data)", "def command_setup(self, *args):\n def usage():\n print(self.command_setup.__doc__)\n sys.exit(1)\n\n if len(args) == 0:\n usage()\n\n try:\n # All of these (except mount_opt) map directly to the model properties\n # We allow several `mount_opt` flags and merge their values, before\n # assigning to the `mount_opts` property (which expects a list).\n fields = [\n \"id\", \"host\", \"port\", \"user\",\n \"mount_opt\", \"mount_point\",\n \"ssh_key\", \"cmd_before_mount\",\n \"auth_method\",\n ]\n opts, _ = getopt.getopt(args, \"\", [\"%s=\" % s for s in fields])\n except getopt.GetoptError as e:\n sys.stderr.write('Error: %s\\n\\n' % e)\n usage()\n\n system = SystemModel()\n mount_opts = []\n for name, value in opts:\n name = name.lstrip('-')\n if not hasattr(system, name):\n continue\n if name == 'mount_opt':\n mount_opts.append(value)\n continue\n setattr(system, name, value)\n system.mount_opts = mount_opts\n\n is_valid, errors = system.validate()\n if not is_valid:\n sys.stderr.write('Invalid data found:\\n')\n for field_name, msg in errors:\n sys.stderr.write(' - %s / %s\\n' % (field_name, msg))\n sys.stderr.write('\\n')\n usage()\n sys.exit(1)\n\n system.save(self.environment)\n print('Configuration created.')\n print('You can try mounting now: `sftpman mount %s`' % system.id)", "def __init__(self, argv):\n tool_path = str(self.__find_tool_path().resolve())\n\n try:\n result = subprocess.run(\n [tool_path],\n stdout=subprocess.PIPE,\n universal_newlines=True\n )\n\n if result.returncode != 0:\n sys.exit(result.returncode)\n\n if (\n len(argv) == 0 or\n (len(argv) == 1 and argv[0] == '-h') or\n (len(argv) == 1 and argv[0] == '--help')\n ):\n print(self.__edit_tool_help(result.stdout))\n else:\n # Call the tool\n result = subprocess.run([tool_path] + argv)\n if result.returncode != 0:\n sys.exit(result.returncode)\n\n except KeyboardInterrupt:\n # it lets the subprocess to handle the exception\n pass\n\n except BaseException as e:\n self.__help_message += str(e)\n self.__help_message += '\\n fast-discovery-server tool not found!'\n print(self.__help_message)\n sys.exit(1)", "def __init__(self, bond, deviceId, device, properties):\n self._bond = bond\n self._deviceId = deviceId\n self._device = device\n self._properties = properties\n self._name = device['name']\n self._state = None\n self._speed_list = []\n self._speed_name_by_value = {}\n self._attributes = {}\n\n if Actions.SET_SPEED in self._device['actions']:\n if 'max_speed' in self._properties:\n self._speed_high = int(self._properties['max_speed'])\n self._speed_low = int(1)\n self._speed_list.append(SPEED_LOW)\n self._speed_name_by_value[self._speed_low] = SPEED_LOW\n if self._speed_high > 2:\n self._speed_medium = (self._speed_high + 1) // 2\n self._speed_list.append(SPEED_MEDIUM)\n self._speed_name_by_value[self._speed_medium] = SPEED_MEDIUM\n self._speed_list.append(SPEED_HIGH)\n self._speed_name_by_value[self._speed_high] = SPEED_HIGH", "def initialize_interface(self):\n self.args, self.phil_args = parse_command_args(self.iver,\n self.help_message).parse_known_args()\n ginp = InputFinder()\n\n # Check for type of input\n if not self.args.path: # No input\n parse_command_args(self.iver, self.help_message).print_help()\n if self.args.default: # Write out default params and exit\n help_out, txt_out = inp.print_params()\n print('\\n{:-^70}\\n'.format('IOTA Parameters'))\n print(help_out)\n return False, 'IOTA_XTERM_INIT: OUTPUT PARAMETERS ONLY'\n elif len(self.args.path) > 1: # If multiple paths / wildcards\n file_list = ginp.make_input_list(self.args.path)\n list_file = os.path.join(os.path.abspath(os.path.curdir), 'input.lst')\n with open(list_file, 'w') as lf:\n lf.write('\\n'.join(file_list))\n msg = \"\\nIOTA will run in AUTO mode using wildcard datapath:\\n\" \\\n \"{} files found, compiled in {}\\n\".format(len(file_list), list_file)\n self.iota_phil = inp.process_input(self.args, self.phil_args, list_file,\n 'auto', self.now)\n self.params = self.iota_phil.extract()\n\n else: # If single path, check type\n carg = os.path.abspath(self.args.path[0])\n if os.path.isfile(carg):\n ptype = ginp.get_file_type(carg)\n if ptype.lower() in ('raw image', 'image pickle'):\n msg = \"\\nIOTA will run in SINGLE-FILE mode using {}:\\n\".format(carg)\n mode = 'auto'\n elif ('iota' and 'settings' in ptype.lower()):\n msg = '\\nIOTA will run in SCRIPT mode using {}:\\n'.format(carg)\n mode = 'file'\n elif 'list' in ptype.lower():\n msg = \"\\nIOTA will run in AUTO mode using {}:\\n\".format(carg)\n mode = 'auto'\n else:\n pr = 'WARNING! File format not recognized. Proceed anyway? [Y/N] '\n unknown_file = raw_input(pr)\n if 'y' in unknown_file.lower():\n ftype = raw_input(\"File type? [image, list, or parameters] \")\n msg = \"\\nIOTA will run WITH DODGY input using {}:\\n\".format(carg)\n if 'par' in ftype:\n mode = 'file'\n else:\n mode = 'auto'\n else:\n print('Exiting...')\n return False, 'IOTA_XTERM_INIT_ERROR: Unrecognizable input!'\n elif os.path.isdir(carg):\n ptype = ginp.get_folder_type(carg)\n if ('image' and 'folder' in ptype.lower()):\n msg = \"\\nIOTA will run in AUTO mode using {}:\\n\".format(carg)\n mode = 'auto'\n else:\n msg = \"IOTA_XTERM_INIT_ERROR: No images in {}!\".format(carg)\n print(self.logo)\n print(msg)\n return False, msg\n\n # If user provided gibberish\n else:\n msg = \"IOTA_XTERM_INIT_ERROR: Invalid input! Need parameter filename \" \\\n \"or data folder.\"\n print(self.logo)\n print(msg)\n return False, msg\n\n # Initialize parameters for this command-line run\n self.iota_phil = inp.process_input(self.args, self.phil_args,\n carg, mode, self.now)\n self.params = self.iota_phil.extract()\n\n # Identify indexing / integration program and add to logo\n b_end = \" with {}\".format(str(self.params.advanced.processing_backend).upper())\n prg = \"{:>{w}}\".format(b_end, w=76)\n self.logo += prg\n print(self.logo)\n print('\\n{}\\n'.format(self.now))\n if msg != '':\n print(msg)\n\n if self.args.analyze is not None:\n print('ANALYSIS ONLY will be performed (analyzing run #{})'.format(\n self.args.analyze))\n self.analyze_prior_results('{:003d}'.format(int(self.args.analyze)))\n return False\n\n if self.params.mp.method == 'mpi':\n rank, size = get_mpi_rank_and_size()\n self.master_process = rank == 0\n else:\n self.master_process = True\n\n # Call function to read input folder structure (or input file) and\n # generate list of image file paths\n\n with prog_message(\"Reading input files\"):\n self.input_list = self.make_input_list()\n\n # Select range of images/objects if turned on\n if self.params.advanced.image_range.flag_on:\n self.input_list = self.select_image_range(self.input_list)\n\n # Pick a randomized subset of images/objects if turned on\n if self.params.advanced.random_sample.flag_on and \\\n self.params.advanced.random_sample.number < len(self.input_list):\n with prog_message(\"Selecting {} random images out of {} found\"\n \"\".format(self.params.advanced.random_sample.number,\n len(self.input_list))):\n self.input_list = self.select_random_subset(self.input_list)\n\n # Check for -l option, output list of input files and exit\n if self.args.list:\n list_file = os.path.abspath(\"{}/input.lst\".format(os.curdir))\n\n # Check if other files of this name exist under the current folder\n list_folder = os.path.dirname(list_file)\n list_files = [i for i in os.listdir(list_folder) if i.endswith(\".lst\")]\n if len(list_files) > 0:\n list_file = os.path.join(list_folder,\n \"input_{}.lst\".format(len(list_files)))\n\n msg = 'IOTA_XTERM_INIT: INPUT LIST ONLY option selected'\n print ('\\n{}'.format(msg))\n print ('Input list in {} \\n\\n'.format(list_file))\n with open(list_file, \"w\") as lf:\n for i, input_file in enumerate(self.input_list, 1):\n lf.write('{}\\n'.format(input_file))\n print (\"{}: {}\".format(i, input_file))\n lf.write('{}\\n'.format(input_file))\n print ('\\nExiting...\\n\\n')\n return False, msg\n\n return True, 'IOTA_XTERM_INIT: Initialization complete!'", "def Demo():\n args = _Parse()\n device = args.device.lower()\n if device == 'keyboard':\n DemoBluetoothHIDKeyboard(args.remote_host_address, args.chars_to_send)\n elif device == 'mouse':\n DemoBluetoothHIDMouse(args.remote_host_address)\n else:\n args.print_help()", "def init_command_objects(self):\n super().init_command_objects()\n\n device_args = (\n self.component_manager,\n self.op_state_model,\n self.obs_state_model,\n self.logger,\n )\n self.register_command_object(\n \"ConfigureScan\", self.ConfigureScanCommand(*device_args)\n )\n self.register_command_object(\"GoToIdle\", self.GoToIdleCommand(*device_args))", "def __init__(self, device_id):\n info_pointer = pm.lib.Pm_GetDeviceInfo(device_id)\n if not info_pointer:\n raise IOError('PortMidi device with id={} not found'.format(\n device_id))\n info = info_pointer.contents\n \n self.device_id = device_id\n self.interface = info.interface.decode('utf-8')\n self.name = info.name.decode('utf-8')\n self.is_input = info.is_input\n self.is_output = info.is_output\n self.opened = bool(info.opened)", "def make_parser():\n\n parser = ArgumentParser(description=\"Create dummy sensor stream esque data\")\n parser.add_argument('--tuples-per-emit', '-t', type=int, default=1,\n help='number of tuples to emit at once')\n parser.add_argument('--sensors', '-s', type=int, default=1,\n help='number of sensors to generate')\n\n return parser", "def __init__(self, pid, init=None):\n self.__pid = pid\n\n cmd_handler = Cmd_Handler(lambda x: x['type'])\n cmd_handler.register_cmd('build', self.decide_build)\n cmd_handler.register_cmd('move', self.decide_move)\n self.__cmd_handler = cmd_handler\n\n self.__opponent = None\n self.look_ahead = 0\n\n self.__state = None\n\n if init:\n self.init_state(init)", "def __init__(self, device=None):\n self.logger = logging.getLogger(self.__class__.__name__)\n self.host = \"localhost\"\n if device is None:\n from droidbot.device import Device\n device = Device()\n self.device = device\n self.port = self.device.get_random_port()\n self.connected = False\n self.__can_wait = True\n\n self.sock = None\n self.last_acc_event = None\n self.enable_accessibility_hard = device.enable_accessibility_hard\n self.ignore_ad = device.ignore_ad\n if self.ignore_ad:\n import re\n self.__first_cap_re = re.compile(\"(.)([A-Z][a-z]+)\")\n self.__all_cap_re = re.compile(\"([a-z0-9])([A-Z])\")", "def load_device():", "def setup(args):\n print(\"[INFO] args:\", json.dumps(args))\n\n token = args['_parameters'].get('token')\n device_type = args['_parameters'].get('device_type')\n\n if not token:\n print(\"[ERROR] Ubidots token not specified\")\n return {\"status\":\"error\"}\n\n elif not device_type and token:\n print(\"[INFO] device type not specified\")\n device_type = \"\"\n\n if device_type != \"\":\n device_type_data = set_device_type(device_type)\n try:\n res = create_device_type(device_type_data, token)\n print(res)\n if res.status_code == 409:\n print(\"[INFO] A device type with this name already exists.\")\n elif res.status_code == 201:\n print(\"[INFO] Device type created successfully.\")\n except Exception as e:\n print(\"[INFO] Setup function ran, but could not create a device type.\")\n print(e)\n else:\n print({\"[INFO] No device type created\"})\n\n return {\"status\":\"finished\"}", "def init_args():\n parser = argparse.ArgumentParser(\n description=\"DeltaSherlock Client software.\")\n parser.add_argument('-v', '--version', action='version', version=VERSION)\n parser.add_argument('-c', '--config', action='store', dest='config_file',\n default='./config.ini', help=\"Path to config file. [default: \\\n %(default)s]\")\n parser.add_argument('-d', '--daemon', action='store_true', dest='daemon',\n default=False, help=\"Run in daemon mode. [default: \\\n %(default)s]\")\n return parser.parse_args()", "def fill_args(args):\n args.agent_module = 'dstar_sgolam_walker'\n args.checkpoint_path = None\n args.exp_config = 'configs/baselines/dstar_proto_sgolam.yaml'\n args.num_episodes = 25\n \n return args", "def _setup_parser():\n parser = argparse.ArgumentParser(add_help=True)\n parser.add_argument('--eval_model', type=str, default=None)\n parser.add_argument('--stack', type=int, default=1)\n parser.add_argument('--flare', action='store_true')\n parser.add_argument('--mixreg', action='store_true')\n\n env_group = parser.add_argument_group(\"Env Args\")\n env_group.add_argument('--env_name', type=str, default=ENV_NAME)\n env_group.add_argument('--num_envs', type=int, default=NUM_ENVS)\n env_group.add_argument('--num_levels', type=int, default=NUM_LEVELS)\n env_group.add_argument('--start_level', type=int, default=START_LEVEL)\n\n agent_group = parser.add_argument_group(\"Agent Args\")\n PPOAgent.add_to_argparse(agent_group)\n\n model_group = parser.add_argument_group(\"Model Args\")\n ImpalaPPO.add_to_argparse(model_group)\n\n return parser", "def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n\n config = kwargs.get(\"config\", kwargs)\n self.connection_type = config.get(\"connection_type\", None)\n self.connection = connection_decider.connection(device=self,\n conn_type=self.connection_type,\n **kwargs)\n self.connection.connect()\n self.consoles = [self]\n super(PrplMeshStation, self).__init__(*args, **kwargs)\n self.iface_dut = self.iface_wifi = self.kwargs.get(\n 'iface', 'wlan0')\n self.driver_name = config.get(\"driver\", \"nl80211,wext\")\n self.mac = self.get_mac()\n\n # kill all wpa_supplicant relevant to active interface\n self.wifi_disconnect()\n # Turn on and off wlan iface just in case\n self.disable_and_enable_wifi()", "def __init__(self, name: str, hw_device: KnauerDAD, channel: int):\n super().__init__(name, hw_device)\n self.channel = channel\n\n # additional parameters\n self.add_api_route(\"/set-wavelength\", self.set_wavelength, methods=[\"PUT\"])\n self.add_api_route(\n \"/set-integration-time\", self.set_integration_time, methods=[\"PUT\"]\n )\n self.add_api_route(\"/set-bandwidth\", self.set_bandwidth, methods=[\"PUT\"])\n\n # Ontology: diode array detector\n self.metadata.owl_subclass_of = \"http://purl.obolibrary.org/obo/CHMO_0002503\"", "def parser_setup():\n ap = argparse.ArgumentParser(description=__doc__)\n ap.add_argument(\"-c\", \"--config-dir\", default=\".\",\n help=\"Configuration directory. Contains YAML configuration\"\n \"files.\")\n ap.add_argument(\"-v\", \"--verbose\", action=\"count\", default=1,\n help=\"Print copious debugging info.\")\n ap.add_argument(\"-q\", \"--quiet\", action=\"count\", default=0,\n help=\"Suppress output. -qq to suppress ALL output.\")\n ap.add_argument(\"-p\", \"--profile\", default=\"all\",\n help=\"Dashboard profile to load from dashdef.yml\")\n ap.add_argument(metavar=\"HOST\", nargs=\"*\", dest=\"host_globs\",\n help=\"Host glob.\")\n return ap", "def __init__(self,model,device):\n self.model = model\n self.device = device", "def __init__(self, config_parser, **kwargs):\n BaseAgent.__init__(self, config_parser)\n\n self.SERVICE_ID = config_parser.get('agent', 'SERVICE_ID')\n self.GENERIC_DIR = config_parser.get('agent', 'CONPAAS_HOME')\n self.VAR_CACHE = config_parser.get('agent', 'VAR_CACHE')\n self.CODE_DIR = join(self.VAR_CACHE, 'bin')\n self.VOLUME_DIR = '/media'\n self.env = {}\n self.processes = {}", "def fill_args(cls, toolchain, parser):\n pass # pass must be overloaded (if required)", "def init(self, arguments):\n url = arguments['<location>']\n if url:\n name = arguments['<name>']\n else:\n url = arguments['<name>']\n name = None\n version = arguments['--box-version']\n instance_name = arguments['--name']\n force = arguments['--force']\n requests_kwargs = utils.get_requests_kwargs(arguments)\n\n if os.path.exists('Mechfile') and not force:\n puts_err(colored.red(textwrap.fill(\n \"`Mechfile` already exists in this directory. Remove it \"\n \"before running `mech init`.\"\n )))\n return\n\n puts_err(colored.green(\"Initializing mech\"))\n if utils.init_mechfile(instance_name, url, name=name, version=version, requests_kwargs=requests_kwargs):\n puts_err(colored.green(textwrap.fill(\n \"A `Mechfile` has been initialized and placed in this directory. \"\n \"You are now ready to `mech up` your first virtual environment!\"\n )))\n else:\n puts_err(colored.red(\"Couldn't initialize mech\"))", "def __init__(self, dataset_dir=None, sensor_data=None, output_dir=None):\n\n self.dataset_dir = None\n self.sensor_data = None\n self.output_dir = None\n self.plot_dir = None\n self.matchup_dataset = None\n self.HarmData = None\n self.hout_path = None\n self.hres_paths = None\n\n if (dataset_dir is not None) and (sensor_data is not None):\n self.dataset_dir = dataset_dir\n self.sensor_data = sensor_data\n self.output_dir = output_dir\n self.plots_dir = pjoin(output_dir, \"plots\")\n try:\n makedirs(self.plots_dir)\n except OSError:\n pass", "def create_from_args(cls, settings):\n loader = cls()\n\n if settings.hdu_index is not None:\n # Note: semantically we treat `hdu_index = 2` differently than\n # `hdu_index = [2]`: the first means use HDU 2 in all files, the\n # second means use HDU 2 in the first file. But we don't have a way\n # to distinguish between the two when parsing the CLI argument.\n try:\n index = int(settings.hdu_index)\n except ValueError:\n try:\n index = list(map(int, settings.hdu_index.split(\",\")))\n except Exception:\n raise Exception(\n \"cannot parse `--hdu-index` setting `{settings.hdu_index!r}`: should \"\n \"be a comma-separated list of one or more integers\"\n )\n\n loader.hdu_index = index\n\n if settings.blankval is not None:\n try:\n # If integer input image, want to avoid roundoff/precision issues\n v = int(settings.blankval)\n except ValueError:\n try:\n v = float(settings.blankval)\n except ValueError:\n raise Exception(\n \"cannot parse `--blankval` setting `{settings.blankval!r}`: should \"\n \"be a number\"\n )\n\n loader.blankval = v\n\n return loader", "def __init__(self, name, host, token):\n self._name = name or DEVICE_DEFAULT_NAME\n self.host = host\n self.token = token\n self._vacuum = None\n self._state = None\n self._state = None\n self._state_attrs = {}\n self._is_on = False" ]
[ "0.62113965", "0.608058", "0.59696805", "0.5943475", "0.59068257", "0.583213", "0.5805206", "0.5683287", "0.5648386", "0.56437355", "0.56389016", "0.56138045", "0.5604826", "0.5592298", "0.5573388", "0.55678135", "0.55641365", "0.5561535", "0.55565435", "0.5537574", "0.55369824", "0.5531317", "0.55162776", "0.5510179", "0.548618", "0.5464697", "0.54606086", "0.5439942", "0.54372567", "0.54372376", "0.54194885", "0.5417041", "0.5415447", "0.541042", "0.5403725", "0.5402059", "0.54018825", "0.5397107", "0.5393879", "0.5391242", "0.53831536", "0.53830075", "0.53787035", "0.53770065", "0.53761077", "0.5373902", "0.53683907", "0.533738", "0.5335584", "0.53312606", "0.532958", "0.53284955", "0.53279835", "0.5323961", "0.5312514", "0.53124714", "0.53031904", "0.52945656", "0.5293814", "0.52924275", "0.5288758", "0.5279952", "0.527701", "0.5276951", "0.5273599", "0.5271894", "0.525352", "0.5250651", "0.5249311", "0.5240174", "0.523878", "0.5228979", "0.5228237", "0.52274245", "0.5227141", "0.52255297", "0.5213254", "0.5209999", "0.5208703", "0.5208123", "0.5202126", "0.5200097", "0.5196929", "0.5194344", "0.5185913", "0.51849663", "0.51849335", "0.5183552", "0.51788646", "0.51774603", "0.51771355", "0.5173373", "0.5171911", "0.5166883", "0.51608545", "0.5159364", "0.5156187", "0.515346", "0.51520175", "0.51474243" ]
0.6631796
0
Override this method to initialize the simulation.
def make_simulation(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize_simulation(self) -> Simulation:\n pass", "def initialise_sim(self):\n pass", "def _setup_simulation(self\n ) -> None:\n pass", "def __init__(self, simulator):\r\n self.initialize(simulator)", "def setup_simulation(self, **kwargs):\n\n self.distance = self.config[\"site\"][\"distance\"]\n self.num_substations = self.config[\"num_substations\"]\n\n self.initialize_substructure_production()\n self.initialize_installation_vessel()", "def initialize(self):\n self._setup_simulation_from_parameters()\n if \"orrb\" in self.constants.observation_providers:\n self._reset()\n self._goal = self._next_goal()\n self.update_goal_info()\n\n self.observer = self._build_observer()", "def startSimulation(self):\n self.saveParameters()\n self.simulation.main()", "def __init__(self) -> None:\n self.simulation = None\n self.update_time = 0.1\n self.time = None\n self.config = None", "def __init__(self):\n print(\"Initializing system...\"),\n for i in range(0,self.numAtoms):\n self.atoms.append(Atom())\n self.assignPositions()\n self.applyBoltzmannDist()\n self.correctMomenta()\n print(\"done.\")\n print(\"Simulation is now running.\")", "def initialize(self) -> None:\n self.simulation = self.initialize_simulation()\n width, height = get_window_resolution()\n display_dim = ((0, width), (0, height))\n self.coord_mapper = CoordinateMapper2D(*self.simulation.dim, *display_dim)\n self.simple_pygame.all_sprites.empty()\n self.initialize_visualization()", "def setUpClass(cls):\n np.random.seed(2019)\n # So the 1 st row of the first random number array, random.rand(500, 3)\n # will be [0.90348221, 0.39308051, 0.62396996]\n # Accordingly, the first row of\n # coordinates = (0.5 - np.random.rand(500, 3)) * box_length\n # should be [-3.31690899, 0.87895379, -1.01912071]\n cls.sys_obj = monte_carlo.SystemSetup()\n cls.energy = energy.Energy()\n cls.parser = monte_carlo.initialize()\n cls.sim = monte_carlo.MonteCarlo(\n cls.sys_obj, cls.energy, cls.parser)\n np.random.seed()", "def __init__(self,simulation_manager):\n self.simulation_manager = simulation_manager", "def __init__(self):\n self._max_sim_time_reached = False\n self._max_wall_time_reached = False\n self._behavior_finished = False\n self._flexbe_status_subscriber = None\n\n self._mission_finalizers = \"\"\n self._mission_sim_time_in_sec = 0\n self._finalizer_functions = []\n\n self.read_ros_params()\n CiLog.info(\"Init of SimulationControl constructor finished.\")", "def experiment_init(self):\n raise NotImplementedError(\"this needs to be implemented!\")", "def initialize(self):\n raise NotImplementedError", "def initialize(self):\n raise NotImplementedError", "def initialize(self):\n raise NotImplementedError", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def __init__(self):\n self.time_limit = None\n self.time_step = None\n self.robot = None\n self.humans = None\n self.global_time = None\n self.human_times = None\n # reward function\n self.discomfort_dist = None\n # simulation configuration\n self.config = None\n self.randomize_attributes = None\n self.square_width = None\n self.circle_radius = None\n self.human_num = None\n # for visualization\n self.states = None\n self.states_traj = None", "def initialize(self):\n\t\tpass", "def setUpClass(self):\n self.c = Simulation(logging_level=50)\n self.c.set_simulation_parameters(\n seed=1,\n task=32,\n output_directory=\"output\",\n min_speciation_rate=0.5,\n sigma=2,\n tau=2,\n deme=1,\n sample_size=0.1,\n max_time=10,\n dispersal_relative_cost=1,\n min_num_species=1,\n )\n self.c.set_map_files(\n sample_file=\"sample/SA_samplemaskINT.tif\",\n fine_file=\"sample/SA_sample_coarse.tif\",\n dispersal_map=\"sample/dispersal_fine.tif\",\n )\n self.c.run()", "def experiment_init(self):\n pass", "def initialize(self):\r\n pass", "def initialize(self):\r\n pass", "def setSimulation(self, simulation):\r\n raise NotImplementedError()", "def _initialize(self):\n self.send_init_command()", "def initialise(self):\n # Can take quite a lot of time due to the homing\n print(\"Initialising spectrograph.\")\n err = self._dll.ShamrockInitialize()\n self.status(\"Initialisation\", err)", "def initialize(self):\n return", "def initialize(self):\n logger.debug(\"Begin Generation\")\n self.events.begin_generation()", "def setUpClass(cls):\n cls.c = Simulation(logging_level=logging.CRITICAL)\n cls.c.set_simulation_parameters(\n seed=3,\n task=32,\n output_directory=\"output\",\n min_speciation_rate=0.5,\n sigma=2,\n tau=2,\n deme=1,\n sample_size=0.1,\n max_time=10,\n dispersal_relative_cost=1,\n min_num_species=1,\n )\n cls.c.set_map_files(\n sample_file=\"sample/SA_samplemaskINT.tif\",\n fine_file=\"sample/SA_sample_coarse.tif\",\n dispersal_map=\"sample/dispersal_fine_nodata.tif\",\n )\n cls.c.run()", "def _setup(self) -> None:\n # Call base implementation\n super()._setup()\n\n # Configure the low-level integrator\n engine_options = self.simulator.engine.get_options()\n engine_options[\"stepper\"][\"iterMax\"] = 0\n engine_options[\"stepper\"][\"dtMax\"] = min(0.02, self.step_dt)\n engine_options[\"stepper\"][\"logInternalStepperSteps\"] = False\n\n # Set maximum computation time for single internal integration steps\n if self.debug:\n engine_options[\"stepper\"][\"timeout\"] = 0.0\n else:\n engine_options[\"stepper\"][\"timeout\"] = 2.0\n\n # Enable logging of geometries in debug mode\n if self.debug:\n engine_options[\"telemetry\"][\"isPersistent\"] = True\n\n # Update engine options\n self.simulator.engine.set_options(engine_options)\n\n # Set robot in neutral configuration\n qpos = self._neutral()\n framesForwardKinematics(\n self.robot.pinocchio_model, self.robot.pinocchio_data, qpos)", "def setUpClass(cls):\n cls.c = Simulation(logging_level=logging.CRITICAL)\n cls.c.set_simulation_parameters(\n seed=2,\n task=32,\n output_directory=\"output\",\n min_speciation_rate=0.5,\n sigma=2,\n tau=2,\n deme=1,\n sample_size=0.1,\n max_time=10,\n dispersal_relative_cost=1,\n min_num_species=1,\n )\n cls.c.set_map_files(\n sample_file=\"sample/SA_samplemaskINT.tif\",\n fine_file=\"sample/SA_sample_coarse.tif\",\n dispersal_map=\"sample/dispersal_fine_cumulative.tif\",\n )\n cls.c.run()", "def init_run(self):\n raise NotImplementedError", "def setup_simulation(system, pdb, integrator):\n #platform = Platform.getPlatformByName('CPU')\n platform = Platform.getPlatformByName('OpenCL')\n prop = {'OpenCLPrecision':'single'}\n\n simulation = Simulation(pdb.topology, system, integrator, platform, prop)\n simulation.context.setPositions(pdb.positions)\n simulation.minimizeEnergy()\n simulation.context.setVelocitiesToTemperature(300*kelvin)\n print('Created simulation')\n return simulation", "def initialize(self) -> None:\n pass", "def initialize(self):\n\n # --------- BEGIN YOUR CODE ----------\n\n # This is exactly the same as Human.initialize, just copy the code over\n\n # --------- END YOUR CODE ----------\n pass", "def agent_init(self):\n pass", "def initialise(self):\r\n return", "def initialise(self):\r\n return", "def initialize(self, platform=None):\n\n if self._simulation is None:\n if type(platform) is str:\n self._simulation = openmm.app.Simulation(\n topology=self.topology.mdtraj.to_openmm(),\n system=self.system,\n integrator=self.integrator,\n platform=openmm.Platform.getPlatformByName(platform),\n platformProperties=self.openmm_properties\n )\n elif platform is None:\n self._simulation = openmm.app.Simulation(\n topology=self.topology.mdtraj.to_openmm(),\n system=self.system,\n integrator=self.integrator,\n platformProperties=self.openmm_properties\n )\n else:\n self._simulation = openmm.app.Simulation(\n topology=self.topology.mdtraj.to_openmm(),\n system=self.system,\n integrator=self.integrator,\n platform=platform,\n platformProperties=self.openmm_properties\n )\n\n logger.info(\n 'Initialized OpenMM engine using platform `%s`' %\n self.platform)", "def initialize(self):\r\n N = self.N\r\n self.mean = array(self.x0, copy=True)\r\n self.sigma = self.sigma0\r\n self.sigmai = np.ones(N)\r\n self.ps = np.zeros(N) # path for individual and globalstep-size(s)\r\n self.r = np.zeros(N)\r\n self.pr = 0 # cumulation for zr = N(0,1)\r\n self.sigma_r = 0", "def initialise(self):", "def setup_simulation(system, pdb, integrator):\n #platform = Platform.getPlatformByName('CPU')\n platform = Platform.getPlatformByName('OpenCL')\n prop = {'OpenCLPrecision':'single'}\n \n simulation = Simulation(pdb.topology, system, integrator, platform, prop)\n simulation.context.setPositions(pdb.positions)\n simulation.minimizeEnergy()\n simulation.context.setVelocitiesToTemperature(310*kelvin)\n print('Created simulation')\n return simulation", "def __init__(self, simulation: 'Simulation', time: datetime) -> None:\n self.simulation = simulation\n self.time = time", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def setup_simulation(system, pdb, integrator):\n #platform = Platform.getPlatformByName('CPU')\n platform = Platform.getPlatformByName('OpenCL')\n prop = {'OpenCLPrecision':'single'}\n \n simulation = Simulation(pdb.topology, system, integrator, platform, prop)\n simulation.context.setPositions(pdb.positions)\n simulation.minimizeEnergy(100)\n simulation.context.setVelocitiesToTemperature(310*kelvin)\n print('Created simulation')\n return simulation", "def Initialize(self):\n problem_data = self.project_parameters[\"problem_data\"]\n if problem_data.Has(\"start_time\"):\n warn_msg = 'Parameter TIME is used as load factor. \\n'\n warn_msg += 'Parameter \"start_time\" will be ignored!'\n KratosMultiphysics.Logger.PrintWarning(\"StructuralMechanicsPrebucklingAnalysis; Warning\", warn_msg)\n else:\n # Create dummy parameter\n aux_settings = KratosMultiphysics.Parameters(r\"\"\"{ \"start_time\" : 1.0 }\"\"\")\n problem_data.AddMissingParameters(aux_settings)\n\n if problem_data.Has(\"end_time\"):\n warn_msg = 'Parameter TIME is used as load factor. \\n'\n warn_msg += 'Parameter \"end_time\" will be ignored!'\n KratosMultiphysics.Logger.PrintWarning(\"StructuralMechanicsPrebucklingAnalysis; Warning\", warn_msg)\n else:\n # Create dummy paramter\n aux_settings = KratosMultiphysics.Parameters(r\"\"\"{ \"end_time\" : 1.0 }\"\"\")\n problem_data.AddMissingParameters(aux_settings)\n\n # Initialize super class\n super().Initialize()\n\n # Initialize solution stepping\n self.step = 0\n self.time = 1\n if not problem_data.Has(\"nsteps\"):\n raise Exception(\"StructuralMechanicsPrebucklingAnalysis: \" + 'Maximum number of steps \"nsteps\" must be provided\"!')\n else:\n self.nsteps = problem_data[\"nsteps\"].GetInt()\n\n ## If the echo level is high enough, print the complete list of settings used to run the simualtion\n if self.echo_level > 1:\n with open(\"ProjectParametersOutput.json\", 'w') as parameter_output_file:\n parameter_output_file.write(self.project_parameters.PrettyPrintJsonString())\n\n KratosMultiphysics.Logger.PrintInfo(self._GetSimulationName(), \"Analysis -START- \")", "def _initialise_run(self) -> None:", "def init(self):\n raise NotImplementedError", "def init(self):\n raise NotImplementedError", "def setUp(self):\n self.s = Simulation()\n self.s['Retina']=GeneratorSheet(nominal_density=4.0)\n self.s['V1']= CFSheet(nominal_density=4.0)\n self.s['V2'] = CFSheet(nominal_density=4.0)\n\n self.s.connect('Retina','V1',delay=0.5,connection_type=CFProjection,\n name='RtoV1',learning_fn=CFPLF_Hebbian())\n\n self.s.connect('Retina','V2',delay=0.5,connection_type=CFProjection,\n name='RtoV2',learning_fn=CFPLF_Hebbian())", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def __init__(self):\n\n self.read_input_file()\n self.read_simulation_files()", "def setUp(self) -> None:\n self.random = np.random.RandomState(seed=42)", "def __init__(self, simulation_params, starting_node_id, person_type=None):\n # import simulation parameters\n self.simulation_params = simulation_params\n self.env = simulation_params.get('simpy_env', None)\n self.dc = simulation_params.get('data_collector', None)\n self.routing = simulation_params.get('routing', None) \n self.time_interval = simulation_params.get('time_interval', None)\n\n # keep a record of person IDs\n self.PID = next(Person_base.get_new_id)\n\n # Routing is the list of environments that the person traverses\n self.routing_node_id = starting_node_id\n\n # Person type is a characteristic which affects behaviour in the microenvironment\n self.person_type = person_type", "def initialize(self): \r\n pass", "def init(self, parameters, agent_parameters):\n pass", "def initialise(self):\n self.set_up()", "def __init__(\n self, sequence: Sequence, config: EmulatorConfig = EmulatorConfig()\n ):\n super().__init__(sequence)\n if not isinstance(config, EmulatorConfig):\n raise TypeError(\n \"'config' must be of type 'EmulatorConfig', \"\n f\"not {type(config)}.\"\n )\n self._config = config\n self._sim_obj = QutipEmulator.from_sequence(\n sequence,\n sampling_rate=self._config.sampling_rate,\n config=SimConfig.from_noise_model(self._config.noise_model),\n evaluation_times=self._config.evaluation_times,\n with_modulation=self._config.with_modulation,\n )\n self._sim_obj.set_initial_state(self._config.initial_state)", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def do_init(self):\n\n pass", "def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True", "def __init__(self):\n\n # Set the seed to 0\n random.seed(0)", "def __init__(self):\n self.robot = None\n self.humans = None\n self.global_time = None\n self.human_times = None\n # Simulation configuration\n self.config = None\n self.time_limit = None\n self.time_step = None\n self.end_on_collision = True\n self.side = None\n self.pixel_side = None\n self.closed = None\n self.goal_radius = None\n self.max_humans = None\n self.min_humans = None\n self.human_num_mode = None\n self.human_num = None\n self.perpetual = None\n self.rotate_path = None\n self.randomize_attributes = None\n self.square_width = None\n self.circle_radius = None\n # Reward function\n self.success_reward = None\n self.collision_penalty = None\n self.discomfort_dist = None\n self.discomfort_scale = None\n self.discomfort_penalty_factor = None\n self.group_discomfort_penalty = None\n self.time_penalty = None\n self.progress_reward = None\n self.initial_distance = None\n self.previous_distance = None\n # Internal environment configuration\n self.case_capacity = None\n self.case_size = None\n self.case_counter = None\n self.parallel = None\n self.max_tries = None\n self.train_val_sim = None\n self.test_sim = None\n # For visualization\n self.force_list = [\n \"desired_force\",\n \"social_force\",\n \"obstacle_force\",\n \"group_coherence_force\",\n \"group_repulsive_force\",\n \"group_gaze_force\",\n ] # TODO Configure this?\n self.forces = None\n self.states = None\n self.action_values = None\n self.attention_weights = None\n # For information return\n self.obs_history = np.array([])\n self.episode_info = dict()\n self.movie_file = \"\"\n\n self.scene_manager = None\n self.use_groups = None\n self.min_group_num = None\n self.max_group_num = None\n self.centralized_planning = None\n self.centralized_planner = None\n\n self.enable_intent = None\n self.intent_type = None\n\n self.obstacles = [] # xmin,xmax,ymin,ymax\n\n self.app = None", "def setup_class(cls):\n\n # set the base directory\n cls.basedir = os.path.join(os.path.split(os.path.realpath(__file__))[0], \"base\")\n\n cls.ninj = 50 # number of simulated signals\n cls.maxamp = 5e-23 # maximum amplitude\n cls.freqrange = (10.0, 100.0) # frequency range\n\n # default prior dictionary\n cls.priors = {}\n cls.priors[\"h0\"] = bilby.core.prior.Uniform(\n name=\"h0\", minimum=0.0, maximum=1e-22\n )", "def __init__( self ):\n self._env = None\n self._steps = None\n\n self._initialize( )", "def initialize(self):\n self.initialize_edges()\n self.initialize_prob()\n self.initialize_total_input_dict()\n\n self.initialize_fpmusigv_dict()", "def initialize(self):\n pass # pragma: no cover", "def initialize(self, simulator):\r\n self.__eventHandlers = {}\r\n self.__simulator = simulator\r\n self.__threads = {}", "def __init__(self, simulation):\n\n self.sim = simulation # A starting simulation with default values\n\n self.master = Tk()\n self.master.title(\"Primedice Simulator\")\n\n self.balance_label, self.balance_input, self.balance_str = \\\n self.make_balance_input()\n self.base_bet_label, self.base_bet_input, self.base_bet_str = \\\n self.make_base_bet_input()\n self.payout_label, self.payout_input, self.payout_str = \\\n self.make_payout_input()\n\n self.iterations_label, self.iterations_input, self.iterations_str = \\\n self.make_iterations_input()\n self.loss_adder_label, self.loss_adder_input, self.loss_adder_str = \\\n self.make_loss_adder_input()\n\n self.run_button = self.make_run_button()\n\n self.progress_label, self.progress_bar = self.make_progress_bar()\n\n self.graph_fig = self.make_graph()\n self.sim_results = None # Placeholder for when results come in\n\n self.master.mainloop()", "def initialize(self, seed=None):\r\n self.seed(seed)", "def setUpClass(cls):\n cls.sim = Simulation(logging_level=40)\n cls.sim.set_simulation_parameters(\n seed=1, task=45, output_directory=\"output\", min_speciation_rate=0.01, sigma=1, deme=40, sample_size=0.25\n )\n cls.sim.set_map(\"null\", 10, 10)\n cls.sim.add_sample_time([0, 0.01, 0.02, 0.03])\n cls.sim.run()\n cls.coal = CoalescenceTree(cls.sim)", "def __init__(self):\n \n smach.State.__init__(self, \n outcomes=['GoToNormal','GoToPlay'])\n \n self.rate = rospy.Rate(200) # Loop at 50 Hz", "def __init__(self):\n self.setupLogger()\n self.revConnect()\n #rev connect must happen FIRST\n super(APISTUB, self).__init__()\n\n self.globalData = CGlobalVariables()\n \n self.iSimulatorCount = 10\n self.lstSimulators = []\n \n for i in xrange(self.iSimulatorCount):\n simulator = SIMULATER(i, self.globalData)\n self.lstSimulators.append(simulator)", "def __init__(self):\n\n smach.State.__init__(self, \n outcomes=['GoToNormal','GoToSleep'])\n \n self.rate = rospy.Rate(200) # Loop at 50 Hz", "def __init__(self, simulation_attributes):\n for attr in ['locations','dprime_fnc','next_fixation',\n 'threshold', 'num_of_searches']:\n if getattr(simulation_attributes,attr) is None:\n assert False, (\n \"Precondition violation: none attribute in simulation_attributes \"\n + attr\n )\n if not isinstance(simulation_attributes, SimulationAttributes):\n raise TypeError(\n \"The argument isn't an instance of SimulationAttributes class\"\n )\n self.senzory_map = self._locations_to_senzory_map(\n simulation_attributes.locations\n )\n self.number_of_locs = self.senzory_map.shape[0]\n self.dprime_fnc = simulation_attributes.dprime_fnc\n self.dprime_map = generate_dprime_map(self.dprime_fnc,self.senzory_map)\n self.next_fixation = simulation_attributes.next_fixation\n self.threshold = simulation_attributes.threshold\n self.num_of_searches = simulation_attributes.num_of_searches", "def setUp(self):\n\n self.veh = Vehicle(0, 0)\n self.R = Random(seed)", "def __init__(self, simulator, filename=None):\n\n data = {} if filename is None else JsonUtils.read_file(filename)\n if data == {}:\n self.logger.warning(\"The config is empty. You may have a problem with your config file.\")\n # Simulation parameters\n self.name = data[\"name\"] if \"name\" in data else \"\"\n self.sim_speed = data[\"sim_speed\"] if \"sim_speed\" in data else 1.0\n self.logger_name = data[\"logger_name\"] if \"logger_name\" in data else \"INFO\"\n self.logger = logging.Logger(self.logger_name)\n self.exit_condition = data[\"exit_condition\"] if \"exit_condition\" in data else \"self.body.config.n_iter > 500\"\n self.timeout = data[\"timeout\"] if \"timeout\" in data else 10\n self.simulator = simulator\n self.t_init = 0\n self.t_end = 0\n self.n_iter = 0\n\n # Physical parameters\n self.body = data[\"body\"] if \"body\" in data else dict()\n self.legs = data[\"legs\"] if \"legs\" in data else []\n self.brain = data[\"brain\"] if \"brain\" in data else dict()\n self.connection_matrix = data[\"connection_matrix\"] if \"connection_matrix\" in data else dict()\n if self.connection_matrix == dict():\n self.config_connection_matrix()\n self.dist_ref = data[\"dist_ref\"] if \"dist_ref\" in data else 20\n self.power_ref = data[\"dist_ref\"] if \"dist_ref\" in data else 1000", "def start(self):\n self.__init__()\n self.set_n_players()\n self.init_players()\n self.init_territory_selection_phase()\n self.init_troop_deployment_phase()\n # self.game_phase()", "def initialize(self):\n self.write_model(path=PATH.GRAD, suffix='new')\n\n if PAR.RANDOM_OVER_IT or optimize.iter == 1:\n self.get_random_frequencies()\n\n print('Generating synthetics')\n system.run('solver', 'eval_func',\n hosts='all',\n path=PATH.GRAD)\n\n self.write_misfit(path=PATH.GRAD, suffix='new')", "def init(self) -> None:\n ...", "def initialize(self):\n self.population.initialize()\n self.cache.initialize()\n if self.storage:\n self.storage.initialize()", "def setUp(self):\n lang = self._sim_lang\n self._simulator = self._find_resource(\n f\"drake/examples/hardware_sim/hardware_sim_{lang}\")\n self._example_scenarios = self._find_resource(\n \"drake/examples/hardware_sim/example_scenarios.yaml\")\n self._test_scenarios = self._find_resource(\n \"drake/examples/hardware_sim/test/test_scenarios.yaml\")\n self._default_extra = {\n # For our smoke test, exit fairly quickly.\n \"simulation_duration\": 0.0625,\n }", "def initialize(self,t0=0.0):\n \n # An connection_distribution_list (store unique connection(defined by weight,syn,prob))\n self.connection_distribution_collection = ConnectionDistributionCollection() # this is \n self.t = t0\n \n # put all subpopulation and all connections into the same platform\n for subpop in self.population_list:\n subpop.simulation = self\n for connpair in self.connection_list:\n connpair.simulation = self\n \n \n \n # initialize population_list, calculate \n \n \n for p in self.population_list:\n p.initialize() # 2 \n \n for c in self.connection_list:\n print 'initialize population'\n c.initialize() # 1" ]
[ "0.8976463", "0.8452447", "0.83930016", "0.7892265", "0.7457877", "0.73970324", "0.7365948", "0.73107266", "0.7242487", "0.71671295", "0.7144739", "0.7124746", "0.7005026", "0.6902338", "0.68967086", "0.68967086", "0.68967086", "0.68929464", "0.68929464", "0.68929464", "0.68929464", "0.68929464", "0.68868893", "0.68862385", "0.68827593", "0.6875647", "0.6844476", "0.6844476", "0.68427014", "0.68329495", "0.68260276", "0.6815797", "0.67945707", "0.6764792", "0.6756723", "0.67553115", "0.6746402", "0.67268217", "0.6689939", "0.66882133", "0.66389656", "0.6635758", "0.6635758", "0.6629959", "0.6628214", "0.6623011", "0.66198665", "0.66156644", "0.6613293", "0.6613293", "0.6613293", "0.6613293", "0.6613293", "0.6613293", "0.6613293", "0.6613293", "0.6611263", "0.6604509", "0.6580604", "0.65652436", "0.65652436", "0.6565208", "0.6560187", "0.6560187", "0.6560187", "0.6558403", "0.6555551", "0.6550582", "0.65495116", "0.65351164", "0.65167624", "0.6506165", "0.6505827", "0.6505827", "0.6505827", "0.6505827", "0.64923686", "0.6488704", "0.6486994", "0.64850897", "0.6483085", "0.6482028", "0.6475837", "0.64738256", "0.64688414", "0.6462957", "0.64586115", "0.64584047", "0.64544", "0.64519733", "0.6450046", "0.64469314", "0.64448804", "0.6443818", "0.64273185", "0.6423641", "0.64160174", "0.64064693", "0.6400064", "0.63846946" ]
0.774349
4
Get the performance of the benchmark during the last ``run``.
def get_performance(self): return self.sim.tps
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def benchmark(self):\n logger.info(self.benchmark.__doc__)\n return self.run(self.benchmark_profile())", "def benchmark_result(self):\n return self._benchmark_id", "def retrieve( self, benchmark, extraLabel='' ):\n if benchmark.reference is ReferenceBenchmark.SP:\n idx = np.argmax( self.sps )\n else:\n # Get reference for operation:\n if benchmark.reference is ReferenceBenchmark.Pd:\n ref = self.pds\n elif benchmark.reference is ReferenceBenchmark.Pf:\n ref = self.pfs\n delta = ref - benchmark.refVal\n idx = np.argmin( np.abs( delta ) )\n return PerformancePoint( name=extraLabel + benchmark.name\n , sp=self.sps[ idx ]\n , pd=self.pds[ idx ]\n , pf=self.pfs[idx]\n , thres=self.thresholds[idx]\n )", "def execute(self):\n print_verbose_messages = (self.verbose\n and self.device.communicator.rank == 0)\n\n # Ensure that all ops are attached (needed for is_tuning_complete).\n self.run(0)\n\n if print_verbose_messages:\n print(f'Running {type(self).__name__} benchmark')\n\n if print_verbose_messages:\n print(f'.. warming up for {self.warmup_steps} steps')\n self.run(self.warmup_steps)\n\n if (isinstance(self.device, hoomd.device.GPU)\n and hasattr(self.sim.operations, 'is_tuning_complete')):\n while not self.sim.operations.is_tuning_complete:\n if print_verbose_messages:\n print('.. autotuning GPU kernel parameters for '\n f'{self.warmup_steps} steps')\n self.run(self.warmup_steps)\n\n if print_verbose_messages:\n print(f'.. running for {self.benchmark_steps} steps '\n f'{self.repeat} time(s)')\n\n # benchmark\n performance = []\n\n if isinstance(self.device, hoomd.device.GPU):\n with self.device.enable_profiling():\n for i in range(self.repeat):\n self.run(self.benchmark_steps)\n performance.append(self.get_performance())\n if print_verbose_messages:\n print(f'.. {performance[-1]} {self.units}')\n else:\n for i in range(self.repeat):\n self.run(self.benchmark_steps)\n performance.append(self.get_performance())\n if print_verbose_messages:\n print(f'.. {performance[-1]} {self.units}')\n\n return performance", "def measure(self):\n # --- perform repeated runs\n for i_run in range(self.n_runs):\n if self.verbosity > 0:\n print(\"Run {0} / {1} ...\".format(i_run, self.n_runs), end = '')\n tdelta = self._timed_execute()\n self._run_times[i_run] = tdelta\n\t\t\t\n if self.verbosity == 2:\n print(tdelta)\n \n # calculate mean\n self._tmean = np.mean(self._run_times)\n # calculate standard deviation\n self._tstdev = np.std(self._run_times)\n # allow access to results\n self.__hasrun = True", "def _get_time(self, state: State) -> int:\n benchmark_time = {\n 'resnet': state.timestamp.epoch.value,\n 'bert': state.timestamp.sample.value,\n }\n return benchmark_time[self.benchmark]", "def get_elapsed_time(self):\r\n self.get_bb_result()\r\n csv_path = self.bb_log_path + os.sep + 'run-logs' + os.sep + 'BigBenchTimes.csv'\r\n if not os.path.isfile(csv_path):\r\n print('BigBenchTimes.csv does not exist in {0}, existing...'.format(self.bb_log_path))\r\n exit(-1)\r\n df = pd.read_csv(csv_path, delimiter=';').loc[:,\r\n ['benchmarkPhase', 'streamNumber', 'queryNumber', 'durationInSeconds']]\r\n elapsed_time = pd.DataFrame()\r\n is_exist = False\r\n for phase in ['POWER_TEST', 'THROUGHPUT_TEST_1']:\r\n benchmark_phase = (df['benchmarkPhase'] == phase)\r\n if any(benchmark_phase): # whether this phase exist in the BB logs\r\n if phase == 'POWER_TEST': # power test overall and each query\r\n stream_num = ((df['streamNumber']) == 0)\r\n query_num = (pd.notnull(df['queryNumber']))\r\n mask = benchmark_phase & stream_num & query_num\r\n seconds = df[mask]['durationInSeconds'].values\r\n elapsed_time.insert(0, phase, seconds)\r\n elapsed_time.index = df[mask]['queryNumber'].astype('int64')\r\n elif phase == 'THROUGHPUT_TEST_1':\r\n streams = int(np.max(df['streamNumber']))\r\n for stream in range(streams + 1):\r\n stream_num = ((df['streamNumber']) == stream)\r\n query_num = (pd.notnull(df['queryNumber']))\r\n mask = benchmark_phase & stream_num & query_num\r\n seconds = df[mask]['durationInSeconds'].values\r\n elapsed_time.insert(stream + 1, 'stream{0}'.format(stream), seconds)\r\n elapsed_time.index = df[mask]['queryNumber'].astype('int64')\r\n is_exist = True\r\n if is_exist:\r\n print('*' * 100)\r\n print('Elapsed time of each query:\\n {0} \\n'.format(elapsed_time.to_string()))\r\n\r\n result_path = self.bb_log_path + os.sep + 'bb_results.log'\r\n with open(result_path, 'a') as f:\r\n f.write('*' * 100 + '\\n')\r\n f.write('Elapsed time of each query:\\n {0} \\n'.format(elapsed_time.to_string()))\r\n else:\r\n print('It seems BigBenchTimes.csv in {0} does not include TPCx-BB phases:POWER_TEST, THROUGHPUT_TEST_1' \\\r\n 'existing...'.format(self.bb_log_path))\r\n exit(-1)", "def last_run(self):\n return self._last_run", "def profile(self, layer, num_iter=50, num_warmup=10, direction='forward'):\n return TimeMeasure()", "def benchmark_it(self, with_gc):\n if self.run_sec is None:\n benchmark_result = self.src\n elif with_gc:\n gc_old = gc.isenabled()\n gc.enable()\n try:\n benchmark_result = self.inner()\n benchmark_result['name'] = self.name\n finally:\n if not gc_old:\n gc.disable()\n else:\n gc_old = gc.isenabled()\n gc.disable()\n try:\n benchmark_result = self.inner()\n benchmark_result['name'] = self.name\n finally:\n if gc_old:\n gc.enable()\n return benchmark_result", "def Run(benchmark_spec):\n _UpdateBenchmarkSpecWithFlags(benchmark_spec)\n vm = benchmark_spec.vms[0]\n if benchmark_spec.tpus:\n # For MLPerf 1.0, the benchmake code of different hardware are different.\n if (benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-32' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-128' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-256' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-512' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-1024' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-2048'):\n run_path = (\n '$HOME/training_results_{version}/Google/benchmarks/{model}/tpu-{tpus}'\n .format(\n version=VERSION.value,\n model=benchmark_spec.benchmark,\n tpus=benchmark_spec.tpu_groups['train'].GetAcceleratorType()))\n code_path = (\n '$HOME/training_results_{version}/Google/benchmarks/{model}/implementations/tpu-{tpus}-{model}'\n .format(\n version=VERSION.value,\n model=benchmark_spec.benchmark,\n tpus=benchmark_spec.tpu_groups['train'].GetAcceleratorType()))\n\n if MASK in benchmark_spec.benchmark:\n model = 'mask_rcnn'\n elif GNMT in benchmark_spec.benchmark:\n model = 'nmt'\n else:\n model = benchmark_spec.benchmark\n\n mlperf_benchmark_cmd = (\n 'cd {code_path} && '\n 'export PYTHONPATH=$(pwd):$(pwd)/{model} && '\n 'cd {model} && '\n '{run_path}/run_and_time.sh'.format(\n code_path=code_path,\n model=model,\n run_path=run_path))\n\n if SSD in benchmark_spec.benchmark:\n mlperf_benchmark_cmd = (\n 'export '\n 'MLP_GCS_RESNET_CHECKPOINT={checkpoint}'\n ' && {cmd}'.format(\n checkpoint=FLAGS.mlperf_gcs_resnet_checkpoint,\n cmd=mlperf_benchmark_cmd))\n else:\n raise ValueError(\n 'MLPerf configurations do not support the hardware in PKB. PKB may '\n 'need to be updated if this is a new TPU type.')\n\n else:\n run_sub_paths = {RESNET: 'resnet/implementations/mxnet',\n TRANSFORMER: 'transformer/implementations/pytorch',\n MINIGO: 'minigo/implementations/tensorflow',\n MASK: 'maskrcnn/implementations/pytorch',\n GNMT: 'gnmt/implementations/pytorch',\n SSD: 'ssd/implementations/pytorch',\n BERT: 'bert/implementations/pytorch',}\n benchmark_path = f'$HOME/training_results_{VERSION.value}/NVIDIA/benchmarks'\n run_path = posixpath.join(benchmark_path,\n run_sub_paths[benchmark_spec.benchmark])\n env = {\n 'DGXSYSTEM': DGXSYSTEM,\n 'NEXP': 1,\n 'PULL': 0,\n 'LOGDIR': f'/tmp/{benchmark_spec.benchmark}',\n }\n envs = {\n RESNET: {},\n TRANSFORMER: {'DATADIR': '/data/wmt/utf8'},\n MINIGO: {'CONT': 'mlperf-nvidia:minigo'},\n MASK: {},\n GNMT: {'DATADIR': '/data/gnmt'},\n SSD: {'DATADIR': '/data'},\n BERT: {}\n }\n env.update(envs[benchmark_spec.benchmark])\n\n run_script = posixpath.join(run_path, 'run_with_docker.sh')\n vm_util.ReplaceText(vm, 'SYSLOGGING=1', 'SYSLOGGING=0', run_script)\n vm_util.ReplaceText(vm, 'docker exec -it', 'docker exec -t', run_script)\n if benchmark_spec.benchmark == RESNET:\n vm_util.ReplaceText(vm, r'mpirun.*run_and_time\\.sh',\n r'.\\/run_and_time.sh', run_script)\n\n env = ' '.join(f'{key}={value}' for key, value in env.items())\n if nvidia_driver.CheckNvidiaGpuExists(vm):\n env = f'{tensorflow.GetEnvironmentVars(vm)} {env}'\n\n mlperf_benchmark_cmd = (\n f'chmod 755 {run_script} && '\n f'cd {run_path} && '\n f'{env} {run_script}')\n\n samples = []\n metadata = _CreateMetadataDict(benchmark_spec)\n stdout, _ = vm.RobustRemoteCommand(mlperf_benchmark_cmd)\n if NONE in FLAGS.mlperf_profiler:\n samples.extend(\n MakeSamplesFromOutput(\n metadata,\n stdout,\n use_tpu=bool(benchmark_spec.tpus),\n model=benchmark_spec.benchmark))\n return samples", "def get_run_stats(self):\n return self.run_stats", "def report_performance(self):\n performance = self.amygdala.visualize(self.timestep, \n self.name, \n self.log_dir)\n print('Final performance is {0:.3}'.format(performance))\n self.backup()\n return performance", "def get_aggregate_time(self, benchmark_index: int, kind: str) -> int:\n time = None\n for bench_case in self.benchmarks[benchmark_index].results:\n if bench_case[\"name\"].endswith(f\"real_time_{kind}\"):\n if bench_case[\"time_unit\"] != \"ms\":\n raise ValueError(f\"Expected ms as time unit\")\n time = int(round(bench_case[\"real_time\"]))\n break\n if time is None:\n raise ValueError(f\"Cannot found real_time_{kind} in benchmark results\")\n return time", "def benchmark_selection(self):\n return self._benchmark_selection", "def benchmark(self, **kwargs):\n num_iterations = kwargs.get(\"benchmark_iterations\")\n\n start_time = time.time()\n\n # store how far off we are\n deviations = []\n\n for _ in xrange(num_iterations):\n kwargs[\"roll\"] = decimal.Decimal(random.uniform(\n self.MIN_BENCHMARK_ROLL, self.MAX_BENCHMARK_ROLL))\n kwargs[\"pitch\"] = decimal.Decimal(random.uniform(\n self.MIN_BENCHMARK_PITCH, self.MAX_BENCHMARK_PITCH))\n\n _, deviation = self.find_closest_trajectory(**kwargs)\n deviations.append(deviation)\n\n # calculate results from the benchmarking\n total_time = time.time() - start_time\n average_time = total_time / num_iterations\n average_deviation = sum(deviations) / len(deviations)\n\n print \"AVERAGE TIME: %s AVERAGE DEVIATION: %s\" \\\n % (average_time, average_deviation)", "def runtime(self):\n return self.tmax_epochs - self.tmin_epochs", "def benchmark_profile(self):\n cb_bin = os.path.join(bin_path, 'compilebench')\n desc = \"benchmark\"\n test_name = \"compilebench_{0}\".format(to_safe_name(desc))\n test = TestProfile(\n name=test_name,\n desc=desc,\n test_path=self.test_path,\n bin_path=bin_path,\n command=\"{0} -D {1} -i 10 --makej\".format(cb_bin, self.test_path))\n\n return test", "def run_benchmark(curl, benchmark, test_config = TestConfig()):\n\n warmup_runs = benchmark.warmup_runs\n benchmark_runs = benchmark.benchmark_runs\n message = '' #Message is name of benchmark... print it?\n\n if (warmup_runs <= 0):\n raise Exception(\"Invalid number of warmup runs, must be > 0 :\" + warmup_runs)\n if (benchmark_runs <= 0):\n raise Exception(\"Invalid number of benchmark runs, must be > 0 :\" + benchmark_runs)\n\n #Initialize variables to store output\n output = BenchmarkResult()\n output.name = benchmark.name\n output.group = benchmark.group\n metricnames = list(benchmark.metrics)\n metricvalues = [METRICS[name] for name in metricnames] # Metric variable for curl, to avoid hash lookup for every metric name\n results = [list() for x in xrange(0, len(metricnames))] # Initialize arrays to store results for each metric\n\n curl.setopt(pycurl.WRITEFUNCTION, lambda x: None) #Do not store actual response body at all.\n\n #Benchmark warm-up to allow for caching, JIT compiling, on client\n logging.info('Warmup: ' + message + ' started')\n for x in xrange(0, warmup_runs):\n if benchmark.method == u'POST' or benchmark.method == u'PUT':\n curl.setopt(curl.READFUNCTION, StringIO.StringIO(benchmark.body).read)\n curl.perform()\n logging.info('Warmup: ' + message + ' finished')\n\n logging.info('Benchmark: ' + message + ' starting')\n\n for x in xrange(0, benchmark_runs): # Run the actual benchmarks\n if benchmark.method == u'POST' or benchmark.method == u'PUT':\n curl.setopt(curl.READFUNCTION, StringIO.StringIO(benchmark.body).read)\n\n try: # Run the curl call, if it errors, then add to failure counts for benchmark\n curl.perform()\n except Exception:\n output.failures = output.failures + 1\n continue # Skip metrics collection\n\n # Get all metrics values for this run, and store to metric lists\n for i in xrange(0, len(metricnames)):\n results[i].append( curl.getinfo(metricvalues[i]) )\n\n logging.info('Benchmark: ' + message + ' ending')\n\n temp_results = dict()\n for i in xrange(0, len(metricnames)):\n temp_results[metricnames[i]] = results[i]\n output.results = temp_results\n\n curl.close()\n return analyze_benchmark_results(output, benchmark)", "def tickPerf(self):\n return self._tickPerf", "def Run(benchmark_spec: bm_spec.BenchmarkSpec) -> List[sample.Sample]:\n discovery_duration = benchmark_spec.data_discovery_service.DiscoverData()\n return [\n sample.Sample('data_discovery_duration', discovery_duration, 'seconds',\n benchmark_spec.data_discovery_service.GetMetadata())]", "def on_get(self, req: Request, resp: Response):\n benchmarks = self.storage.fetch_benchmark({})\n\n response = build_benchmarks_response(benchmarks)\n resp.text = json.dumps(response)", "def get_benchmark(self, benchmark):\n\t\tif not isinstance(benchmark, str) and not callable(benchmark): return benchmark\n\t\telif benchmark in self.classes:\treturn self.classes[benchmark]()\n\t\traise TypeError('Passed benchmark is not defined!')", "def _run_benchmark(self, params):\n logging.info('Running benchmark [%s]', self._get_name())\n params = benchmark_cnn.setup(params)\n bench = benchmark_cnn.BenchmarkCNN(params)\n bench.print_info()\n stats = bench.run()\n extras = {}\n extras['examples_per_sec'] = stats.get('images_per_sec')\n if 'last_average_loss' in stats:\n extras['last_average_loss'] = stats['last_average_loss']\n if 'top_1_accuracy' in stats:\n extras['top_1_accuracy'] = stats['top_1_accuracy']\n if 'top_5_accuracy' in stats:\n extras['top_5_accuracy'] = stats['top_5_accuracy']\n self.report_benchmark(\n iters=stats.get('num_steps'),\n wall_time=stats.get('average_wall_time'),\n extras=extras)", "def get_benchmark(client):\n r = client.get(config.API_PATH() + '/benchmarks')\n benchmarks = json.loads(r.data)\n return benchmarks['benchmarks'][0]['id']", "def runtime(self):\n return self.stop_time - self.start_time", "def get_benchmark_result(cmd):\n print 'running benchmark with command:'\n print cmd\n p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT)\n out, err = p.communicate() # We can only load 1 url on Chrome at a time\n print out\n return out, err, p.returncode", "def t(self):\n return self._data_writer.get_current_run_time_ms()", "def __call__(self, param):\n count = param.nbatch\n if self.last_count > count:\n self.init = False\n self.last_count = count\n\n if self.init:\n if count % self.frequent == 0:\n speed = self.frequent * self.batch_size / (time.time() - self.tic)\n s = ''\n if param.eval_metric is not None:\n name, value = param.eval_metric.get()\n s = \"Epoch[%d] Batch [%d]\\tSpeed: %.2f samples/sec\\tTrain-\" % (param.epoch, count, speed)\n for n, v in zip(name, value):\n s += \"%s=%f,\\t\" % (n, v)\n else:\n s = \"Iter[%d] Batch [%d]\\tSpeed: %.2f samples/sec\" % (param.epoch, count, speed)\n\n logging.info(s)\n print(s)\n self.tic = time.time()\n else:\n self.init = True\n self.tic = time.time()", "def get_post_stats(self):\n stats = self.stats\n stats.results = self.job.result().get_counts(stats.iteration)\n stats.datetime = str(datetime.now())", "def __call__(self, epoch):\n # Compute the new dynamic learning rate, log in onto TensorBoard and\n # return the result for the training process\n learning_rate = self.schedule(epoch)\n tf.summary.scalar('learning rate', data=learning_rate, step=epoch)\n return learning_rate", "def get_time_elapsed(self, pipeline_run):\n begin = pipeline_run[\"steps\"][0][\"method_calls\"][0][\"start\"]\n begin_val = parse(begin)\n end = pipeline_run[\"steps\"][-1][\"method_calls\"][-1][\"end\"]\n end_val = parse(end)\n total_time = (end_val - begin_val).total_seconds()\n return total_time", "def next_run_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"next_run_time\")", "def next_run_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"next_run_time\")", "def get_perf(self) :\n self.train()\n self.df_true = self.df_true[self.length:]\n self.accuracy , self.recall, self.specificity, self.profit, self.min , self.max = get_accuracy_LSTM(self.df_test, self.df_true,self.model, self.length)", "def evaluate_benchmarks(self):\n\n # iterate over replicates\n results = {}\n for replicate_id, replicate in self.replicates:\n\n # evaluate benchmark for current replicate\n bmark = SimulationBenchmark(replicate.copy(),\n graph=self.graphs[replicate_id],\n **self.params)\n\n # store results\n results[replicate_id] = dict(\n\n labels_MAE=bmark.scores['labels'].MAE,\n level_only_MAE=bmark.scores['level_only'].MAE,\n spatial_only_MAE=bmark.scores['spatial_only'].MAE,\n community_MAE=bmark.scores['labels_comm'].MAE,\n\n labels_PCT=bmark.scores['labels'].percent_correct,\n level_only_PCT=bmark.scores['level_only'].percent_correct,\n spatial_only_PCT=bmark.scores['spatial_only'].percent_correct,\n community_PCT=bmark.scores['labels_comm'].percent_correct)\n\n # compile dataframe\n results = pd.DataFrame.from_dict(results, orient='index')\n results.index.set_names(self.multiindex, inplace=True)\n\n return results", "def performance_measure(self, x):\n # \"calculate performance measure\" \n pref = x.evaluate()\n return pref", "def get_latest_benchmark():\n\n benchmark_paths = glob.glob(\"./.benchmarks/*/*.json\")\n dates = [\n \"\".join(_b.split(\"/\")[-1].split(\"_\")[2:4]) for _b in benchmark_paths\n ]\n benchmarks = {date: value for date, value in zip(dates, benchmark_paths)}\n\n dates.sort()\n latest = dates[-1]\n benchmark_latest = benchmarks[latest]\n\n return benchmark_latest", "def __call__(self, param):\n count = param.nbatch\n if self.last_count > count:\n self.init = False\n self.last_count = count\n\n if self.init:\n if count % self.frequent == 0:\n # #11504\n try:\n speed = self.frequent * self.batch_size / (time.time() - self.tic)\n except ZeroDivisionError:\n speed = float('inf')\n if param.eval_metric is not None:\n name_value = param.eval_metric.get_name_value()\n if self.auto_reset:\n param.eval_metric.reset()\n msg = 'Epoch[%d] Batch [%d-%d]\\tSpeed: %.2f samples/sec'\n msg += '\\t%s=%f'*len(name_value)\n logging.info(msg, param.epoch, count-self.frequent, count, speed, *sum(name_value, ()))\n else:\n msg = 'Epoch[%d] Batch [0-%d]\\tSpeed: %.2f samples/sec'\n msg += '\\t%s=%f'*len(name_value)\n logging.info(msg, param.epoch, count, speed, *sum(name_value, ()))\n else:\n logging.info(\"Iter[%d] Batch [%d]\\tSpeed: %.2f samples/sec\",\n param.epoch, count, speed)\n self.tic = time.time()\n else:\n self.init = True\n self.tic = time.time()", "def get_speedtest():\n\n if(DEBUG):\n print(\"Gathering speedtest results...\", flush=True)\n\n s = Speedtest()\n s.get_best_server()\n s.download()\n \n return s.results.dict()", "def cpu_time(self):", "def get_perf(self) :\n self.train()\n\n prediction = self.clf.predict(self.df_test.drop(columns = 'up')[:-1])\n self.accuracy = accuracy_score(df_test['up'][length:].values, prediction)\n tn, fp, fn, tp = confusion_matrix(df_test['up'][length:].values, prediction).ravel()\n self.recall = tp/(tp+fn)\n self.specificity = tn / (tn+fp)\n\n\n self.df_true = self.df_true[self.length:]\n\n profit = 1\n mini = 1\n maxi = 1\n self.df_true['close'] = self.df_true['close'].map(lambda x : np.exp(x))\n for s in range(1,len(self.df_true)):\n if prediction[x-1] == 1 :\n result = ((self.df_true['close'].iloc[s] -self.df_true['close'].iloc[s-1]) / self.df_true['close'].iloc[s-1]) + 1\n profit = profit * result\n if result < mini :\n mini = result\n if maxi < result :\n maxi = result\n self.mini = mini\n self.maxi = maxi\n self.profit = profit", "def fetch_last(self, type):\n cursor = self.conn.cursor()\n sql = 'SELECT {} FROM speedlogs ORDER BY measure_dt DESC LIMIT 1'.format(type)\n cursor.execute(sql)\n return cursor.fetchone()", "def _run_time(func):\n start_time = datetime.datetime.now()\n func\n end_time = datetime.datetime.now()\n return end_time - start_time", "def runtime(self):\n return (self.time - self.start).total_seconds()", "def get(self):\n if self.running:\n return self.accumulated_time + pg.time.get_ticks() - self.start_time\n else:\n return self.accumulated_time", "def get_performance(self):\n if self.skip_reference:\n return self.compare_sim.tps\n\n # Avoid divide by zero errors when the simulation is not executed.\n if self.reference_sim.tps == 0:\n return 0\n\n t0 = 1 / self.reference_sim.tps\n t1 = 1 / self.compare_sim.tps\n return 1 / (t1 - t0)", "def bench_report(t1, t2):\n print \"\\n\\n Time taken: {0}\".format(t2 - t1)", "def get_latency(self):\n raise NotImplementedError()", "def pc_work_time_total(self) -> \"float\":\n return _beamforming_swig.randomsampler_sptr_pc_work_time_total(self)", "def RunBenchmark(path_to_apk, run_label):\n # `path_to_apk` is similar to `./out/59.0.3071.132_arm_MonochromeStable.apk`\n chrome_version = ChromeVersion(path_to_apk.split('/')[-1].split('_')[0])\n subprocess.call(['adb', 'install', '-r', '-d', path_to_apk])\n subprocess.call([os.path.join(utils.CHROMIUM_SRC, 'tools',\n 'perf', 'run_benchmark'),\n '--browser=android-system-chrome',\n '--pageset-repeat=1', # could remove this later\n '--results-label=%s' % str(chrome_version),\n # TODO(wangge):not sure if we should run in compatibility\n # mode even for the later version, probably add a check in\n # caller to determine if we should run it in compatibility\n # mode and add an argument `run_in_compatibility_mode` to\n # the `RunBenchmark` function\n '--compatibility-mode=no-field-trials',\n '--compatibility-mode=ignore-certificate-errors',\n '--compatibility-mode=legacy-command-line-path',\n '--compatibility-mode=gpu-benchmarking-fallbacks',\n '--story-filter=wikipedia', # could remove this\n # thinking of adding an argument to the tool to set this\n '--output-dir=%s' % os.path.join(\n utils.APP_ROOT, 'results', run_label,\n str(chrome_version.milestone)),\n # thinking of adding an argument to the tool to set this too\n 'system_health.memory_mobile'])", "def get_runs_to_average(self):\n\n if Test.performance_params: return int(Test.performance_params[1])\n elif self._check_performance: return self._runs_to_average\n else: return None", "def _run():\n subprocess.check_call(\n [\n \"tools/bazel\",\n \"build\",\n \"-c\",\n \"opt\",\n \"test/core/memory_usage/memory_usage_test\",\n ]\n )\n ret = {}\n for name, benchmark_args in _BENCHMARKS.items():\n for scenario, extra_args in _SCENARIOS.items():\n # TODO(chenancy) Remove when minstack is implemented for channel\n if name == \"channel\" and scenario == \"minstack\":\n continue\n try:\n output = subprocess.check_output(\n [\n \"bazel-bin/test/core/memory_usage/memory_usage_test\",\n ]\n + benchmark_args\n + extra_args\n )\n except subprocess.CalledProcessError as e:\n print(\"Error running benchmark:\", e)\n continue\n for line in output.splitlines():\n for key, (pattern, conversion) in _INTERESTING.items():\n m = re.match(pattern, line)\n if m:\n ret[scenario + \": \" + key] = conversion(m.group(1))\n return ret", "def run_times(self):\n if self.__hasrun:\n return self._run_times\n else:\n raise ValueError(\"Cannot report unmeasured times.\")", "def pc_work_time_avg(self):\n return _add_vector_swig.add_vector_2_cpp_sptr_pc_work_time_avg(self)", "def measure(self, lastMeasure=None, m=None):\n if m is None:\n m = {}\n m['_time'] = time.time()\n if lastMeasure is not None:\n m['_stepDuration'] = time.time() - lastMeasure['_time']\n else:\n m['_stepDuration'] = time.time() - self._start_t\n self._msr(m)\n return m", "def timings(self):\r\n return self._timings", "def get_last_sample(self) -> InternalSample:", "def get_measured_current(self):\n status = self.get_status_response()\n current = status[16] + (status[17] * 0x100) + (status[18] * 0x10000) + (status[19] * 0x1000000)\n current = float(current)\n current /= (1000.0 * 1000.0)\n return current\n #end get_measured_current", "def _get_running_time(self):\n time_sum = 0.0\n for subdir in os.listdir(self.path):\n if subdir.startswith('.'):\n continue\n try:\n line = open('{0}/{1}/{2}/out/OUTDOCK'.format(self.path, subdir, DOCKING_RUN_FILES),'r').readlines()[-1]\n if line.startswith('elapsed time'):\n time = float(line.split()[-1])\n time_sum = time_sum + time\n except:\n pass \n self.running_time = time_sum", "def count_benchmarks():\n return len(setup_storage().fetch_benchmark({}))", "def get_run_time(testname):\n logname = testname + \".log\"\n cmdline = \"\"\"awk '/STKPERF: Total Time/ { print $4; }' %s \"\"\"%(\n logname)\n try:\n pp = subprocess.run(cmdline, shell=True, check=True, capture_output=True)\n return pp.stdout.decode('UTF-8').strip()\n except:\n return \"\"", "def get_last(self):\n self.accumulated_time_last = pg.time.get_ticks() - self.start_time_last\n return self.accumulated_time_last", "def _timed_execute(self):\n tstart = time.perf_counter()\n self._func(*self._func_args, **self._func_kwargs)\n tend = time.perf_counter() \n\n tdelta = tend - tstart\n\n return tdelta", "def pc_work_time(self) -> \"float\":\n return _beamforming_swig.randomsampler_sptr_pc_work_time(self)", "def final_eval(self):\n # Test performance - Load best model\n self.load_model(self.best_model_path, model_type='best')\n logging.info(\"Loading best model after epoch: %d\" %\n self.train_info['epoch'])\n\n perf_file = path.join(self.model_dir, \"perf.json\")\n if self.slurm_id:\n parent_dir = path.dirname(path.normpath(self.model_dir))\n perf_dir = path.join(parent_dir, \"perf\")\n if not path.exists(perf_dir):\n os.makedirs(perf_dir)\n perf_file = path.join(perf_dir, self.slurm_id + \".json\")\n\n output_dict = {'model_dir': self.model_dir}\n for key, val in vars(self.args).items():\n output_dict[key] = val\n\n for split in ['dev', 'test']:\n logging.info('\\n')\n logging.info('%s' % split.capitalize())\n result_dict = self.eval_model(split, final_eval=True)\n if split != 'test':\n logging.info('Calculated F1: %.3f' % result_dict['fscore'])\n\n output_dict[split] = result_dict\n\n json.dump(output_dict, open(perf_file, 'w'), indent=2)\n\n logging.info(\"Final performance summary at %s\" % perf_file)\n sys.stdout.flush()", "def findBenchFromDevice(self, device):\n return device.bench", "def reference_benchmark_result(self):\n return self._reference_id", "def calc_stats(results):\r\n all_res = []\r\n count = 0\r\n for values in results.status_code_counter.values():\r\n all_res += values\r\n count += len(values)\r\n\r\n cum_time = sum(all_res)\r\n\r\n if cum_time == 0 or len(all_res) == 0:\r\n rps = avg = min_ = max_ = amp = 0\r\n else:\r\n if results.total_time == 0:\r\n rps = 0\r\n else:\r\n rps = len(all_res) / float(results.total_time)\r\n avg = sum(all_res) / len(all_res)\r\n max_ = max(all_res)\r\n min_ = min(all_res)\r\n amp = max(all_res) - min(all_res)\r\n stdev = math.sqrt(sum((x-avg)**2 for x in all_res) / count)\r\n\r\n return (\r\n RunStats(count, results.total_time, rps, avg, min_, max_, amp, stdev)\r\n )", "def speed_test(self):\n self.lg.debug('Performing speed test no. {}'.format(self.runs))\n self.st.get_best_server()\n self.st.upload()\n self.st.download()\n up = self.st.results.upload // 1e6\n down = self.st.results.download // 1e6\n timestamp = time.localtime(time.time())\n self.lg.debug('Timestamp: {}'.format(\n time.strftime('%H:%M:%S', timestamp)))\n self.lg.debug(\n 'Upload is {} Mbps'.format(up))\n self.lg.debug(\n 'Download is {} Mbps'.format(down))\n self.results_up.append(up)\n self.results_down.append(down)\n self.results_timestamp.append(timestamp)", "def pc_work_time_avg(self):\n return _TestA_swig.my_qpsk_demod_cb_sptr_pc_work_time_avg(self)", "def collect_perf_data(tracker: cm.RateTracker):\n pd = PerfData()\n\n pd.compile_time = _get_optional_counter(KEY_COMPILE_TIME) * 1e-3\n pd.programming_time = _get_optional_counter(KEY_PROGRAMMING_TIME) * 1e-9\n pd.est_samples_per_sec = _get_optional_counter(KEY_SYSTEM_PERF)\n\n pd.total_samples = tracker._partial_count + tracker._count\n pd.samples_per_sec = tracker.global_rate()\n if pd.samples_per_sec > 0:\n pd.total_time = float(pd.total_samples) / pd.samples_per_sec\n else:\n pd.total_time = 0.0\n\n return pd", "def pc_work_time_avg(self):\n return _TestA_swig.cleanslate_sptr_pc_work_time_avg(self)", "def measure(func):\n if func not in measured_funcs:\n measured_funcs.add(func)\n if not hasattr(func, 'total_runtime'):\n func.total_runtime = 0.0\n if not hasattr(func, 'total_calls'):\n func.total_calls = 0\n\n def wrapper(*args, **kwargs):\n before_call = datetime.datetime.now()\n res = func(*args, **kwargs)\n elapsed = datetime.datetime.now() - before_call\n func.total_runtime += elapsed.total_seconds()\n func.total_calls += 1\n return res\n\n return wrapper", "def get_last_run(self):\n\n outdir = self.testTopDir + self.suiteName + \"-tests/\"\n\n # this will work through 2099\n if os.path.isdir(outdir):\n dirs = [d for d in os.listdir(outdir) if (os.path.isdir(outdir + d) and\n d.startswith(\"20\"))]\n dirs.sort()\n\n return dirs[-1]\n else:\n return None", "def after_run(self):\n # Calculate the performance of the strategy and portfolio\n self.portfolio.calc_stats()\n self.calc_performance()\n\n return self", "def measure(self):\n pass", "def run(batch_size):\n start = time.time()\n \n model = get_model()\n history = train(model, batch_size)\n\n y_true, y_pred = predict(model)\n precision, recall, fscore, acc_score = get_metrics(y_true, y_pred)\n tn, fp, fn, tp = get_confusion_matrix(y_true, y_pred)\n\n acc = history.history['acc']\n val_acc = history.history['val_acc']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n\n # Write your own logging.\n # log(history, start, precision, recall, fscore, acc_score, tn, fp, fn, tp)\n min_val_loss = min(history.history['val_loss'])\n\n now = datetime.now()\n id = now.strftime('%Y%m%d%H%M%S')\n elapsed_s = time.time() - start\n elapsed = time.strftime('%H:%M:%S', time.gmtime(elapsed_s))\n\n print(elapsed)\n\n print('Accuracy: ', acc_score)\n print('Precision: ', precision)\n print('Recall: ', recall)\n print('F-score: ', fscore)\n\n print('TN: ', tn)\n print('FP: ', fp)\n print('FN: ', fn)\n print('TP: ', tp)\n\n return min_val_loss, precision, recall, fscore, acc, val_acc, loss, val_loss, id", "def profile(self):\n return NumericStatsMixin.profile(self)", "def run_epoch(self, sample_batch):\n start = time.time()\n loss = super().run_epoch(sample_batch)\n end = time.time()\n duration = end - start\n\n self.iteration += 1\n\n self.logger.info(\"Iteration: {}, Loss: {}, Time: {}\".format(self.iteration,\n loss,\n duration))\n\n return loss", "def get_chartdata():\n callback = bottle.request.query.get('callback')\n y_axis = bottle.request.query.get('y_axis').strip()\n w_acts = [\"action='%s'\" % act for act in bottle.request.query.get('actions').strip().split(',')]\n w_acts = 'AND (%s)' % ' OR '.join(w_acts) if w_acts else ''\n f_value = 'AVG(latency)' if y_axis.startswith('avg') else 'COUNT(timestamp)'\n atomic = 1 if y_axis in ['aops', 'avgl'] else 0\n\n db_conn = tools.get_db_conn('%s.db' % bottle.request.query.test_run_id)\n sql = 'SELECT test_run_status, timestamp_started, timestamp_completed FROM info LIMIT 1'\n status, started, finished = tools.db_query(db_conn, sql)[1][0]\n progress = int(float(finished) - float(started)) if finished \\\n else int(tools.get_timestamp() - float(started))\n\n sql = 'SELECT substr(timestamp, 0, 11), code, %s FROM recs ' % f_value + \\\n 'WHERE atomic=%s %s GROUP BY code, substr(timestamp, 0, 11) ' % (atomic, w_acts) + \\\n 'ORDER BY id DESC LIMIT 3600' # last 1 hour activity\n\n result = tools.db_query(db_conn, sql)[1] if finished else tools.db_query(db_conn, sql)[1][:-1]\n result = list(reversed(result))\n results = {str(abs(int(item[0]) - int(float(started)))):\n {'failed': 0, 'passed': 0, 'incomplete': 0} for item in result}\n for item in result: # item[0] - timestamp, item[1] - code (None if incomplete), item[2] - value\n timestamp = str(int(item[0]) - int(float(started)))\n value = item[2] or 0\n results[timestamp]['failed'] += value if item[1] and item[1] != 200 else 0\n results[timestamp]['passed'] += value if item[1] == 200 else 0\n results[timestamp]['incomplete'] += value if item[1] == None else 0\n results = [{'timestamp': key, 'failed': value['failed'], 'passed': value['passed'],\n 'incomplete': value['incomplete']} for key, value in results.items()]\n result = {bottle.request.query.slave: results, 'status': status,\n 'started': started, 'finished': finished or '(not finished)', 'progress': progress}\n return '{0}({1})'.format(callback, result)", "def test_runner_full_loop(caplog, dataset):\n caplog.set_level(logging.DEBUG)\n\n session = dataset\n\n start_date = datetime.datetime(2020, 5, 17, 13, 0, 0)\n end_date = datetime.datetime(2020, 5, 17, 13, 0, 5)\n replay_rate = 1 \n \n db_connector_test = DataBaseConnector(session=session, \n table_name='timeseries_dataset', \n time_column='timestamp', \n start_date=start_date,\n end_date=end_date)\n\n test_publisher = ConsolePublisher()\n\n runner = CentralRunner(db_connection=db_connector_test, \n output_system=test_publisher, \n start_time=start_date, \n end_time=end_date,\n replay_rate=replay_rate )\n\n start = time.perf_counter()\n \n runner.run()\n\n end = time.perf_counter()\n\n code_time = end - start\n \n print(code_time)\n \n assert int(code_time) == 4", "def run_benchmark(take_geo_mean, num_runs, bench_func, *args):\n #if options.profile:\n # import cProfile\n # prof = cProfile.Profile()\n # prof.runcall(bench_func, num_runs, *args)\n # prof.print_stats(sort=options.profile_sort)\n #else:\n data = bench_func(num_runs, *args)\n if take_geo_mean:\n product=1\n _total=0\n for _x in data:\n _total+=_x\n product *= _x\n _geo_mean=math.pow(product, 1.0 / len(data))\n return \"Runs: %d, Total Time:%5.3f, Geo Mean:%6.4f\" % (len(data), _total, _geo_mean)\n else:\n for x in data:\n print(x)", "def __benchmark(self, clf):\n print('=' * 80)\n print('Training: ')\n print(clf)\n train_start = time()\n clf.fit(self.X_train, self.Y_train)\n train_time = time() - train_start\n print(\"The training time was: %0.3fs\" % train_time)\n\n test_start = time()\n pred = clf.predict(self.X_test)\n test_time = time() - test_start\n print(\"The test time was: %0.3fs\" % test_time)\n\n score = metrics.accuracy_score(self.Y_test, pred)\n print(\"accuracy: %0.3f\" % score)\n\n return score", "def benchmark(self, f, name, publish=True, **kwargs):\n (\n tags,\n optional_benchmark_info,\n context,\n info,\n github,\n options,\n cluster_info,\n _,\n ) = self._init(kwargs)\n self.set_python_info_and_context(info, context)\n\n timing_options = self._get_timing_options(options)\n iterations = timing_options.pop(\"iterations\")\n if iterations < 1:\n raise ValueError(f\"Invalid iterations: {iterations}\")\n\n try:\n data, output = self._get_timing(f, iterations, timing_options)\n # It's hard to read what this next function call really does. It\n # does _not_ publish, but I think it creates a specific data\n # structure. Should this be in the exception handler? Within\n # self._get_timing() above we run user-given code, so that is\n # expected to raise exceptions, and wants to be handled. But which\n # exceptions is self.record() expected to raise especially when\n # _not_ doing HTTP interaction? And why do we handle those\n # exceptions in the same way as those exceptions that are raised by\n # user-given code?\n benchmark, _ = self.record(\n {\"data\": data, \"unit\": \"s\"},\n name,\n tags=tags,\n optional_benchmark_info=optional_benchmark_info,\n context=context,\n info=info,\n github=github,\n options=options,\n cluster_info=cluster_info,\n publish=False,\n )\n except Exception as exc:\n error = {\"stack_trace\": traceback.format_exc()}\n benchmark, _ = self.record(\n None,\n name,\n tags=tags,\n optional_benchmark_info=optional_benchmark_info,\n context=context,\n info=info,\n github=github,\n options=options,\n cluster_info=cluster_info,\n error=error,\n publish=False,\n )\n raise exc\n finally:\n if publish:\n # It's a bit unclear -- is `benchmark` defined in _all_ cases\n # when we arrive here?\n # https://pylint.readthedocs.io/en/latest/user_guide/messages/error/used-before-assignment.html\n self.publish(benchmark) # pylint: disable=used-before-assignment\n return benchmark, output", "def _after_run_finished(self):\n\n # append lists for this run\n self._mutual_info.append([])\n self._baseline_mutual_info.append([])\n\n self._classifier_accuracy.append([])\n self._baseline_classifier_accuracy.append([])\n\n self._steps.append([])\n\n self._average_boosting_dur.append([])\n self._average_delta.append([])\n self._different_steps.append([])\n\n # get all the measurements from the last run\n last_run_measurements = self._measurement_manager.run_measurements[-1]\n\n # temporary sliding window for computation of one value of the mutual information\n labels_window = []\n outputs_window = []\n baseline_outputs_window = []\n\n print('computing statistics after run...')\n\n # go step-by-step through the last run (single_measurement contains all the values taken in that time-step)\n for single_measurement in last_run_measurements:\n\n # these two measurements have to run with different (lower) frequency\n if 'average_boosting_dur' in single_measurement.keys():\n self._average_boosting_dur[-1].append(single_measurement['average_boosting_dur'])\n self._average_delta[-1].append(single_measurement['average_delta'])\n self._different_steps[-1].append(single_measurement['current_step'])\n\n # pick \"dataset_labels\" (see the init()) from the single_measurement and append one value to the separate list\n labels_window.append(single_measurement['dataset_labels'])\n outputs_window.append(single_measurement['model_outputs'])\n baseline_outputs_window.append(single_measurement['baseline_outputs'])\n\n # wait until the window has enough values\n if len(labels_window) < self._evaluation_period:\n continue\n\n # compute stats in the window and store to the last run (that's the [-1]) at the end (that's the append)\n self._mutual_info[-1].append(\n compute_mutual_information(\n np.array(labels_window),\n np.array(outputs_window),\n self._num_classes,\n data_contains_id=True)\n )\n\n if self._debug_mi:\n self._debug_mutual_info(np.array(labels_window), np.array(outputs_window), self._mutual_info[-1][-1])\n\n self._baseline_mutual_info[-1].append(\n compute_mutual_information(\n np.array(labels_window),\n np.array(baseline_outputs_window),\n self._num_classes,\n data_contains_id=True)\n )\n\n # compute the classifier accuracies (for model and baseline)\n dev = self._topology_adapter.get_device()\n output_dim = self._topology_adapter.get_model_output_size()\n\n labels_tensor = torch.tensor(labels_window, dtype=torch.long, device=dev)\n outputs_tensor = torch.tensor(outputs_window, dtype=torch.long, device=dev)\n baseline_outputs_tensor = torch.tensor(baseline_outputs_window, dtype=torch.long, device=dev)\n\n acc = self._compute_classifier_acc(outputs_tensor, labels_tensor, output_dim)\n self._classifier_accuracy[-1].append(acc)\n\n baseline_acc = self._compute_classifier_acc(baseline_outputs_tensor, labels_tensor, output_dim)\n self._baseline_classifier_accuracy[-1].append(baseline_acc)\n\n # store also step (for the x-axis)\n self._steps[-1].append(single_measurement['current_step'])\n\n # remove the self._sliding_window_stride items from the sliding windows.. (then fill the same amount..)\n for i in range(0, self._sliding_window_stride):\n if len(labels_window) > 0:\n labels_window.pop(0)\n outputs_window.pop(0)\n baseline_outputs_window.pop(0)", "def _get_average_run_time(cls, suite_model):\n counter = 0\n execution_time = 0.0\n suite_documents = suite_model.get_last_five()\n for suite_document in suite_documents:\n counter += 1\n start_date = dateutil.parser.parse(suite_document['start_date'])\n end_date = dateutil.parser.parse(suite_document['end_date'])\n time_taken = end_date - start_date\n execution_time += time_taken.seconds\n if counter == 0:\n return \"30 Minutes\"\n minutes = math.floor(execution_time / 60)\n seconds = int(execution_time - (minutes * 60))\n return \"{} Minutes {} Seconds\".format(minutes, seconds)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def time_stat(self, stat=\"mean\"):\n\n # create cdo command and run it\n cdo_command = f\"cdo -tim{stat}\"\n run_this(cdo_command, self, output=\"ensemble\")", "def benchmark(func):\n start = time.time()\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n rc = func(*args, **kwargs)\n print('Running time: {}'.format(time.time() - start))\n return rc\n return wrapper", "def evaluate_and_log (self, config, budget):\n\n start = time.time()\n res = self.compute(config, budget=budget)\n end = time.time()\n\n\n id = (len(self.run_data), 0,0)\n\n # construct a Datum object to mimic the internals of a HpBandSter iteration\n res_dict = {budget: {'loss': res['loss'], 'info': res['info']}}\n ts_dict = {budget: {'submitted': start, 'started': start, 'finished': end}}\n self.run_data[id] = Datum(config, {}, results=res_dict, budget=budget, time_stamps = ts_dict, status='FINISHED')\n\n return(res[\"loss\"])", "def step(self):\n fit_default_config = {\"verbose\": self.verbose}\n fit_default_config.update(self.config.get(\"fit_config\", {}))\n\n history = self.model.fit(self.train_dataset, **fit_default_config)\n if history is None:\n stats = {}\n else:\n stats = {\"train_\" + k: v[-1] for k, v in history.history.items()}\n\n self.epoch += 1\n return stats", "def get_curr_exec_time(self):\n if self.type == 'normal':\n try:\n self.curr_exec_time = self.my_rand.gauss(self.runtime, self.stddev)\n except:\n if self.fwk.debug:\n print(\"not varying the execution time\")\n self.curr_exec_time = self.runtime\n raise\n self.start_exec_time = self.fwk.fwk_global_time\n self.state = \"running\"\n elif self.type == 'sandia_work':\n # this is a sandia style work task\n next_ckpt = self.sim.next_ckpt # relative work time\n work_todo = self.sim.total_work - self.sim.completed_work\n self.curr_exec_time = min(work_todo, next_ckpt)\n self.start_exec_time = self.fwk.fwk_global_time\n self.state = \"running\"\n elif self.type == 'sandia_rework':\n next_ckpt = self.sim.next_ckpt # relative work time\n self.curr_exec_time = min(self.sim.rework_todo, next_ckpt)\n self.start_exec_time = self.fwk.fwk_global_time\n self.state = \"running\"\n elif self.type == 'sandia_ckpt' or self.type == 'sandia_restart':\n self.curr_exec_time = self.runtime\n self.start_exec_time = self.fwk.fwk_global_time\n self.state = \"running\"\n else:\n print('error error error!!! problem with component type in get_curr_exec_time')\n raise", "def std_run_time(self) -> float:\n return float(self.result_array.sum(axis=0).std())", "def getTime():\n\n return float(time.perf_counter()*1000)" ]
[ "0.72080225", "0.6449681", "0.6247628", "0.6000796", "0.5958153", "0.5957309", "0.59554595", "0.58821696", "0.5833112", "0.58005464", "0.5773313", "0.574518", "0.57411474", "0.57227314", "0.5712661", "0.56783533", "0.56588525", "0.5634061", "0.55844593", "0.5574638", "0.55643535", "0.55509484", "0.5529781", "0.54945374", "0.54851186", "0.5458872", "0.5455796", "0.5454589", "0.54156625", "0.5412742", "0.53759843", "0.5367371", "0.53616047", "0.53616047", "0.5346787", "0.53426635", "0.53391004", "0.5308637", "0.53054106", "0.5297594", "0.5295674", "0.52921695", "0.52899086", "0.5282529", "0.5277746", "0.5275194", "0.52683246", "0.52641475", "0.5242224", "0.5240646", "0.5238138", "0.523697", "0.52368903", "0.5233951", "0.5228645", "0.5222364", "0.52220345", "0.5213305", "0.5206193", "0.52018636", "0.5196924", "0.51922476", "0.518772", "0.51876193", "0.51821864", "0.5181288", "0.5180799", "0.5178575", "0.5163908", "0.51585114", "0.5150649", "0.5150041", "0.5149934", "0.51494926", "0.5140637", "0.5140091", "0.5139954", "0.51321214", "0.5108757", "0.510479", "0.51008135", "0.5097119", "0.5096106", "0.5091215", "0.5090037", "0.50850594", "0.5075272", "0.5071199", "0.5071199", "0.5071199", "0.5071199", "0.5071199", "0.5071199", "0.50710934", "0.5070726", "0.506956", "0.50685906", "0.50642467", "0.50633055", "0.50597453" ]
0.5574966
19
Run the benchmark for the given number of steps.
def run(self, steps): self.sim.run(steps)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self, steps = 1000):\n for step in range(steps):\n if self.is_done():\n return\n self.step()", "def run(self, steps=1000):\n for step in range(steps):\n if self.is_done():\n return\n self.step()", "def run(self, steps=1000):\n for step in range(steps):\n if self.is_done():\n return\n self.step()", "def run(self, step:int=0):\n if step > 0:\n _range = range(self.current_step, self.current_step + step + 1)\n else: # run forever\n _range = itertools.count(self.current_step)\n for step_num in _range:\n self.step()", "def run_one(num):\n start = time.time()\n if not config.get('radosbench'):\n benchcontext = {}\n else:\n benchcontext = copy.copy(config.get('radosbench'))\n iterations = 0\n while time.time() - start < int(config.get('time', 600)):\n log.info(\"Starting iteration %s of segment %s\"%(iterations, num))\n benchcontext['pool'] = str(num) + \"-\" + str(iterations)\n with radosbench.task(ctx, benchcontext):\n time.sleep()\n iterations += 1", "def run_trials(self, num=0):\n if num == 'all':\n self.trials_to_run = len(self.trials)\n else:\n self.trials_to_run = num\n self.vision_egg.go()", "def run(num_trials):\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.1, display=True) \n # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=num_trials) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n a.performace_report(num_trials)", "def perform_mult_steps(self, num_steps, tc, batch_size):\n\n self.sample_v()\n self.cli.empty().print(self.titlestr % (\"k\", \"objective\", *[f\"lm_{i} x constr_{i}\" for i in range(len(self.lagrange_mults))]))\n\n # perform a min step\n for s in range(num_steps):\n batch = tc.sample(batch_size, ['add_act', 'grad_ranking', 'add_pi'])\n self.perform_step(s, batch)", "def lab_run_big(character_id, time_step):\n pass", "def simulate(self, n, dt=None):\n for _ in range(n):\n self.step(dt)", "def setNumIterations(*argv):", "def execute_timesteps(self, num_timesteps, max_timesteps_per_episode=0, update_spec=None, deterministic=False):\n pass", "def run_circuit_and_measure(\n self, circuit: Circuit, n_samples: Optional[int] = None, **kwargs\n ) -> Measurements:\n self.number_of_circuits_run += 1\n self.number_of_jobs_run += 1", "def run(self, steps):\n if not self.skip_reference:\n self.reference_sim.run(steps)\n self.compare_sim.run(steps)", "def run_trials(environ, total):\n run_times = []\n\n for i in range(0, total):\n environ.run()\n run_times.append(environ.total_time)\n\n return run_times", "def setNumTimeSubSteps(*argv):", "def main(num_trials, num_actions):\n\tfor i in xrange(int(num_trials)):\n\t\ttrial(i+1, int(num_actions))", "def run_verbose(self, steps = 10):\n for step in range(steps):\n if self.is_done():\n print 'Done, stopping.'\n print self.to_string()\n return\n print self.to_string()\n self.step()", "def steps(self,num_steps):\n if self.last_sensation == TERMINAL_STATE:\n self.start_episode()\n for step in range(num_steps):\n next_sensation,reward = self.env(self.next_action)\n self.collect_data(self.last_sensation, self.next_action, reward, next_sensation)\n self.next_action = self.agent(next_sensation,reward)\n self.last_sensation = next_sensation\n if self.last_sensation == TERMINAL_STATE:\n self.start_episode()", "def run_burn_in(self, n_burn: int) -> None:\n for n in range(n_burn):\n self.perform_step()\n return", "def run_amount_of_ticks(self, amount):\n\t\tfor i in range(amount):\n\t\t\tself.run_tick()", "def run_test(_freq, cmd):\n for count in range(_freq):\n os.system(cmd.replace(\"result\", \"result\" + str(count + 1)))", "def run_simulation(self, num_games=10):\n for _ in range(num_games):\n self.result.append(self.single_game())", "def runner_scenario_x_times(repetitions, scenario_names, feature_files, out):\n if scenario_names is not None:\n to_test = scenario_names\n elif feature_files is not None:\n to_test = feature_files\n else:\n to_test = \"testsuite\"\n msg = (\"\\nRunning \" + str(repetitions) + \" times test(s):\\n \" \n + str(to_test) + \"\\n\")\n print(msg)\n if out:\n out_name = os.path.splitext(out)[0]\n ext = os.path.splitext(out)[1]\n for i in range(repetitions):\n print(\"Iteration number: \" + str(i+1))\n if out:\n out = out_name + \"-\" + str(i) + ext\n p = Process(target=worker_scenario, \n args=(scenario_names, feature_files, out))\n p.start()\n p.join()", "def run():\n step = 0\n while traci.simulation.getMinExpectedNumber() > 0:\n traci.simulationStep()\n step+=1\n traci.close()\n sys.stdout.flush()", "def lab_run_small(character_id, time_step):\n pass", "def run_multiple_test_cycles(self):\n # Perform as many cycles as required\n while self.args.repetitions >= 0:\n self.run_one_test_cycle()\n self.args.repetitions -= 1", "def benchmark(self):\n logger.info(self.benchmark.__doc__)\n return self.run(self.benchmark_profile())", "def run_benchmarks(urls, urlIndices, trial_number):\n path.append(os.path.join(CHROMIUM_SRC, 'tools/perf/'))\n benchmark_path = os.path.join(CHROMIUM_SRC, 'tools/perf/run_benchmark')\n output_path = 'temp'\n trial_key = 'trial{0}'.format(trial_number)\n\n cmd = ('sudo ' + benchmark_path + ' --profiler=trace telemetryBenchmarks.url{0}')\n for i in urlIndices:\n try:\n out, err, returncode = get_benchmark_result(cmd.format(i))\n timeout = False\n print 'successfully ran benchmark for url' + str(i)\n except TimeoutError:\n # Benchmark failed\n print 'Benchmark Timeout!'\n out = ''\n returncode = 1\n timeout = True\n\n failed = ['FAILED']\n if returncode != 0 or any(x in out for x in failed) or timeout:\n # If a benchmark fails, remove its corresponding wpr file, and act\n # as if it didn't exist\n # Remove from data/wpr_source\n print 'Benchmark {0} failed'.format(i)\n print 'return code is ' + str(returncode)\n print 'Out:'\n print out\n print 'Err:'\n print err\n urlName = 'url{0}_page_set_000.wpr'.format(i)\n urlpcName = 'url{0}_pc_page_set_000.wpr'.format(i)\n urlFilePath = os.path.join('data/wpr_source',urlName)\n urlpcFilePath = os.path.join('data/wpr_source',urlpcName)\n urlCmd = 'rm -f {0}'.format(urlFilePath)\n urlpcCmd = 'rm -f {0}'.format(urlpcFilePath)\n print 'Removing: {0}, {1}'.format(urlFilePath, urlpcFilePath)\n commands = [\n 'rm -f {0}'.format(urlFilePath),\n 'rm -f {0}'.format(urlpcFilePath)\n ]\n for cmdss in commands:\n p = Popen(cmdss, shell=True)\n p.wait()\n # Skip the rest of this url\n print \"Moving on!\"\n continue\n\n # Parse data\n tmp_path = 'temp/tmp_benchmark_result_json'\n with open(tmp_path, 'rb') as f:\n tmp_json = json.load(f)\n benchmark_results = tmp_json['values']\n commands = [\n 'rm -f ~/page_load_time/telemetry/temp/tmp_benchmark_result_json',\n ]\n for cmds in commands:\n p = Popen(cmds, shell=True)\n p.wait()\n\n output = {urls[i]: {'cold_times': {trial_key: benchmark_results}}}\n output_file = os.path.join(output_path, urlsafe_b64encode(urls[i]))\n output_file += '.' + str(trial_number)\n try:\n with open(output_file, 'w') as f:\n json.dump(output, f)\n except IOError:\n raise IOError('Unable to write to {0}'.format(output_file))\n\n\n ############### Now run for Perfect Cache file ################\n\n try:\n out, err, returncode = \\\n get_benchmark_result(cmd.format(str(i) + '_pc'))\n timeout = False\n print 'successfully ran benchmark for url' + str(i) + '_pc'\n except TimeoutError:\n # Benchmark failed\n print 'Benchmark Timeout!'\n out = ''\n returncode = 1\n timeout = True\n\n failed = ['FAILED']\n if returncode != 0 or any(x in out for x in failed) or timeout:\n # If a benchmark fails, remove its corresponding wpr file, and act\n # as if it didn't exist\n # Remove from data/wpr_source\n\n print 'Benchmark {0}_pc failed'.format(i)\n print 'Out:'\n print out\n print 'Err:'\n print err\n urlName = 'url{0}_page_set_000.wpr'.format(i)\n urlpcName = 'url{0}_pc_page_set_000.wpr'.format(i)\n urlFilePath = os.path.join('data/wpr_source',urlName)\n urlpcFilePath = os.path.join('data/wpr_source',urlpcName)\n urlCmd = 'rm -f {0}'.format(urlFilePath)\n urlpcCmd = 'rm -f {0}'.format(urlpcFilePath)\n print 'Removing: {0}, {1}'.format(urlFilePath, urlpcFilePath)\n commands = [\n 'rm -f {0}'.format(urlFilePath),\n 'rm -f {0}'.format(urlpcFilePath)\n ]\n for cmdss in commands:\n p = Popen(cmdss, shell=True)\n p.wait()\n # Skip the rest of this url\n print \"Moving on!\"\n continue\n\n # Parse data\n tmp_path = 'temp/tmp_benchmark_result_json'\n with open(tmp_path, 'rb') as f:\n tmp_json = json.load(f)\n benchmark_results = tmp_json['values']\n\n commands = [\n 'rm -f ~/page_load_time/telemetry/temp/tmp_benchmark_result_json',\n ]\n for cmds in commands:\n p = Popen(cmds, shell=True)\n p.wait()\n\n output = {urls[i]: {'cold_times': {trial_key: benchmark_results}}}\n output_file = os.path.join(output_path, urlsafe_b64encode(urls[i]))\n output_file += '.' + str(trial_number) + '.pc'\n try:\n with open(output_file, 'w') as f:\n json.dump(output, f)\n except IOError:\n raise IOError('Unable to write to {0}'.format(output_file))", "def runTimingTests(c, startNx, endNx, stepNx, displayResults = False):\n timesArray = []\n nxs = np.empty(shape=[0])\n iteration = 0\n\n for currNx in range(startNx, endNx, stepNx):\n nx = currNx\n nt = nx\n nxs = np.append(nxs, nx)\n _, timesSmooth, _, _ = main(nx, nt, c, displayResults = False)\n timesArray = np.append(timesArray, timesSmooth)\n iteration = iteration+1\n \n timesArray = timesArray.reshape(iteration, len(timesSmooth)) \n timesArray = np.matrix.transpose(timesArray)\n logNxs = np.log10(nxs)\n logTimes = np.log10(timesArray)\n methods = [\"FTBS\", \"CTCS\", \"CNCS\", \"LaxWendroff\"]\n if(display):\n for i in range (0, 4):\n plt.plot(logNxs, logTimes[i], label=methods[i])\n coeff = np.polyfit(logNxs,logTimes[i],1)\n print(\"Estimated order of magnitude time vs nx \"\\\n +methods[i]+\": \"+str(coeff[0]))\n plt.title(\"Log-log plot time of execution in s vs nx\\nc=\"+str(c))\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n plt.show()", "def run(self):\n self.speed_test.start()", "def steps(self, step_count):\n self.dir.value(0 if step_count > 0 else 1)\n for i in range(abs(step_count)):\n self.stp.value(1)\n sleep_us(self.step_time)\n self.stp.value(0)\n sleep_us(self.step_time)\n self.current_position += step_count", "def main(benchmark, size=None, backend=None, repetitions=None, burnin=1, device=\"cpu\"):\n try:\n bm_module, bm_identifier = get_benchmark_module(benchmark)\n except ImportError as e:\n click.echo(f\"Error while loading benchmark {benchmark}: {e!s}\", err=True)\n raise click.Abort()\n\n available_backends = set(bm_module.__implementations__)\n\n if len(backend) == 0:\n backend = available_backends.copy()\n else:\n backend = set(backend)\n\n unsupported_backends = [b for b in backend if b not in available_backends]\n\n for b in unsupported_backends:\n click.echo(\n f'Backend \"{b}\" is not supported by chosen benchmark (skipping)', err=True\n )\n backend.remove(b)\n\n for b in backend.copy():\n try:\n with setup_functions[b](device=device) as bmod:\n click.echo(f\"Using {b} version {bmod.__version__}\")\n except BackendNotSupported as e:\n click.echo(\n f'Setup for backend \"{b}\" failed (skipping), reason: {e!s}', err=True\n )\n backend.remove(b)\n\n try:\n check_backend_conflicts(backend, device)\n except BackendConflict as exc:\n click.echo(f\"Backend conflict: {exc!s}\", err=True)\n raise click.Abort()\n\n runs = sorted(itertools.product(backend, size))\n\n if len(runs) == 0:\n click.echo(\"Nothing to do\")\n return\n\n timings = {run: [] for run in runs}\n\n if repetitions is None:\n click.echo(\"Estimating repetitions...\")\n repetitions = {}\n\n for b, s in runs:\n # use end-to-end runtime for repetition estimation\n def run_func():\n run = bm_module.get_callable(b, s, device=device)\n with setup_functions[b](device=device):\n run()\n\n repetitions[(b, s)] = estimate_repetitions(run_func)\n else:\n repetitions = {(b, s): repetitions for b, s in runs}\n\n all_runs = list(\n itertools.chain.from_iterable(\n [run] * (repetitions[run] + burnin) for run in runs\n )\n )\n random.shuffle(all_runs)\n\n results = {}\n checked = {r: False for r in runs}\n\n pbar = click.progressbar(\n label=f\"Running {len(all_runs)} benchmarks...\", length=len(runs)\n )\n\n try:\n with pbar:\n for (b, size) in all_runs:\n with setup_functions[b](device=device):\n run = bm_module.get_callable(b, size, device=device)\n with Timer() as t:\n res = run()\n\n # YOWO (you only warn once)\n if not checked[(b, size)]:\n if size in results:\n is_consistent = check_consistency(\n results[size], convert_to_numpy(res, b, device)\n )\n if not is_consistent:\n click.echo(\n f\"\\nWarning: inconsistent results for size {size}\",\n err=True,\n )\n else:\n results[size] = convert_to_numpy(res, b, device)\n checked[(b, size)] = True\n\n timings[(b, size)].append(t.elapsed)\n pbar.update(1.0 / (repetitions[(b, size)] + burnin))\n\n # push pbar to 100%\n pbar.update(1.0)\n\n for run in runs:\n assert len(timings[run]) == repetitions[run] + burnin\n\n finally:\n stats = compute_statistics(timings)\n click.echo(format_output(stats, bm_identifier, device=device))", "def run(self):\n last = self.system.last_timestep\n start = last.timestep + 1 if last else 0\n del last\n end = self.system.cg_steps\n \n logging.info(\"running timesteps {} to {}\".format(start, end))\n \n for _ in range(start, end):\n self.system.begin_timestep()\n self.atomistic_step()\n self.cg_step()\n self.system.end_timestep()\n \n logging.info(\"completed all {} timesteps\".format(end-start))", "def test_after_jam_step_two(self):\n for test_suite_class in self.jam_step_2_test_suite_list:\n test_suite = test_suite_class(self)\n results = test_suite.run()\n self.test_results += results", "def main():\n parser = optparse.OptionParser()\n parser.add_option('--debug', action='store_true', default=False,\n help='run in debug mode')\n parser.add_option('-i', '--iteration', type=int, default=DEFAULT_ITERATION,\n metavar='NUM',\n help='set the number of iterations for each test (defualt:%d)' % \\\n DEFAULT_ITERATION)\n parser.add_option('-f', '--fstypes', default='ext2,ext3,ext4,btrfs,xfs',\n type='string', metavar='TYPES', help='set the file systems to test')\n parser.add_option('-n', '--num', default=10000, type=int, metavar='NUM',\n help='set the number of file created')\n parser.add_option('-N', '--numa', action='store_true', default=False,\n help='run NUMA test')\n parser.add_option('-S', '--scalability', action='store_true', default=False,\n help='run scalability test')\n global options\n options, args = parser.parse_args()\n\n benchutils.check_root_or_die()\n suffix = ''\n if options.numa:\n suffix = 'numa'\n else:\n suffix = 'scale'\n output_dir = benchutils.get_output_directory(suffix=suffix, timestamp=True)\n fstypes = options.fstypes.split(',')\n for fs in fstypes:\n if options.numa:\n run_tests(output_dir, fs)\n elif options.scalability:\n run_scalability_tests(output_dir, fs)", "def run_scrapping():\n date = datetime.now().strftime(\"%Y-%m-%d\")\n size = 100\n r = list(range(size))\n random.shuffle(r)\n for i in r:\n scrap_page(url_page.format(i), date)\n print(str(i) + \" / \" + str(size))", "def run_step(self, milliseconds):\n stopDistance = self.params['safeDistance']\n\n timeStep = timedelta(milliseconds=milliseconds)\n newTime = self.time + timeStep # Time after step is performed.\n\n for light in self._lights:\n if newTime > light.getNextSwitchTime():\n light.switch(newTime)\n\n toRemove = [ ]\n for car in self._cars:\n if car.state != Car.DELETED:\n car.prepareMove(timeStep)\n else:\n toRemove.append(car)\n\n for car in toRemove: self._cars.remove(car)\n for car in self._cars: car.finishMove()\n\n # Generate new car.\n # It is always added to the queue and if there is enough place then\n # it will be instantly added to the road.\n carsToAdd, newLastCarTime = self.howManyCarsToAdd(newTime)\n self.addCars(carsToAdd)\n self._lastCarGenerationTime = newLastCarTime\n\n self.addCarsFromQueueToRoad()\n\n # Update time.\n self.time = newTime", "def steps_to_run(current_step, steps_per_epoch, steps_per_loop):\n if steps_per_loop <= 0:\n raise ValueError('steps_per_loop should be positive integer.')\n if steps_per_loop == 1:\n return steps_per_loop\n remainder_in_epoch = current_step % steps_per_epoch\n if remainder_in_epoch != 0:\n return min(steps_per_epoch - remainder_in_epoch, steps_per_loop)\n else:\n return steps_per_loop", "def single_run(steps_number):\n values = list()\n numerator = 0\n for i in trange(1, steps_number):\n\n numerator += generate_episode()\n\n values.append(numerator / i)\n\n return np.array(values)", "def test(self, n_test_runs: int = 10) -> None:\n steps: np.ndarray = np.zeros(n_test_runs)\n rewards: np.ndarray = np.zeros(n_test_runs)\n for t in range(n_test_runs):\n steps[t], rewards[t] = self.step(collect=False)\n\n self.get_logger().warn('---------- TEST RUN RESULTS ----------')\n self.get_logger().warn(f'Average: {steps.mean()}')\n self.get_logger().warn(f'STD: {steps.std()}')\n self.get_logger().warn(f'Median: {np.median(steps)}')\n self.get_logger().warn(f'Average Reward: {rewards.mean()}')", "def WarpStep(iters=5):\n MSG(\"WarpStep\")\n for j in range(iters):\n warp.step()\n return", "def execute_and_get_timesteps(self, num_timesteps, max_timesteps_per_episode=0, deterministic=False):\n pass", "def run(self, n=1, speed=1.0, rnd=0, filename=None, start_frame=0, verbose=True, crop=None):\n if verbose and filename:\n print 'rendering %s frames as %s ... %s' % (n, (filename % start_frame), (filename % (start_frame + n - 1)))\n for k in xrange(n):\n self.z += rnd * rand(*self.z.shape)\n self.step(speed=speed)\n if filename:\n out = self.rgb_image()\n if crop:\n out = out[crop[0]:crop[1],crop[2]:crop[3],...]\n imsave(filename % (k + start_frame), out)\n if verbose:\n print n - k,\n sys.stdout.flush()", "def run(self, n_sweeps, therm_factor=0.1, sweep_factor=1, n_flips=None):\n if self.samples_file:\n f = open(self.samples_file, 'w')\n\n if n_flips:\n n_flips = self.hamiltonian.min_flips()\n if n_flips != 1 and n_flips != 2:\n raise ValueError('Invalid number of spin flips')\n if not (0 <= therm_factor <= 1):\n raise ValueError('The thermalization factor should be a real '\n 'number between 0 and 1')\n if n_sweeps < 50:\n raise ValueError('Too few steps in MC. Please use at least 50')\n\n print('Starting MC Sampling')\n print('Will perform {} steps'.format(n_sweeps))\n\n self.nqs.init_lookup_tables(self.current_state)\n self.reset_sampler_values()\n\n if therm_factor != 0:\n print('Starting Thermalization')\n\n n_moves = int(therm_factor * n_sweeps) * \\\n int(sweep_factor * self.n_visible)\n for _ in range(n_moves):\n self.move(n_flips)\n\n print('Completed Thermalization')\n\n self.reset_sampler_values()\n\n print('Starting Monte Carlo Sampling')\n\n for i in range(int(n_sweeps)):\n for _ in range(int(sweep_factor * self.n_visible)):\n self.move(n_flips)\n self.current_Hloc = self.local_energy()\n self.state_history.append(np.array(self.current_state))\n self.local_energies.append(self.current_Hloc)\n if self.samples_file:\n self.write_current_state(f)\n\n print('Completed Monte Carlo Sampling')\n\n if self.samples_file:\n f.close()\n\n return self.estimate_wf_energy()", "def execute(self):\n print_verbose_messages = (self.verbose\n and self.device.communicator.rank == 0)\n\n # Ensure that all ops are attached (needed for is_tuning_complete).\n self.run(0)\n\n if print_verbose_messages:\n print(f'Running {type(self).__name__} benchmark')\n\n if print_verbose_messages:\n print(f'.. warming up for {self.warmup_steps} steps')\n self.run(self.warmup_steps)\n\n if (isinstance(self.device, hoomd.device.GPU)\n and hasattr(self.sim.operations, 'is_tuning_complete')):\n while not self.sim.operations.is_tuning_complete:\n if print_verbose_messages:\n print('.. autotuning GPU kernel parameters for '\n f'{self.warmup_steps} steps')\n self.run(self.warmup_steps)\n\n if print_verbose_messages:\n print(f'.. running for {self.benchmark_steps} steps '\n f'{self.repeat} time(s)')\n\n # benchmark\n performance = []\n\n if isinstance(self.device, hoomd.device.GPU):\n with self.device.enable_profiling():\n for i in range(self.repeat):\n self.run(self.benchmark_steps)\n performance.append(self.get_performance())\n if print_verbose_messages:\n print(f'.. {performance[-1]} {self.units}')\n else:\n for i in range(self.repeat):\n self.run(self.benchmark_steps)\n performance.append(self.get_performance())\n if print_verbose_messages:\n print(f'.. {performance[-1]} {self.units}')\n\n return performance", "def test_benchmark(self):\n\n proc = subprocess.Popen([\n sys.executable,\n benchmark.__file__,\n self.live_server_ws_url,\n ])\n for _ in range(0, 90, 5):\n time.sleep(5)\n if proc.returncode:\n break\n else:\n proc.terminate()\n proc.wait()\n assert proc.returncode == 0", "def run_tests(self):\n with self.report.timer.record(\"run\"):\n self.result.report.extend(self._run_tests())", "def RunSuite(config, files, extra_flags, errors):\n global ERRORS, CONCURRENCY\n Banner('running %d tests' % (len(files)))\n pool = multiprocessing.Pool(processes=CONCURRENCY)\n # create a list of run arguments to map over\n argslist = [(num, len(files), config, test, extra_flags)\n for num, test in enumerate(files)]\n # let the process pool handle the test assignments, order doesn't matter\n pool.map(RunTest, argslist)\n while not ERRORS.empty():\n phase, test = ERRORS.get()\n errors[phase].append(test)", "def time(n):\n steps = 3 + math.ceil(n/5.0)*2\n return steps", "def step(self, n, dlist):\n pass", "def step(self, steps):\n self._simulate(endStep=self.currentStep+steps)", "def set_number_of_time_steps(self, number_of_time_steps):\n self.number_of_time_steps = number_of_time_steps", "def warmup_step(ckpt_step: int) -> float:\n return ckpt_step * 10", "def _step(self):\n title()\n self.runCount = 1\n self.experiment.pause = False\n self._runExperiment()\n self.pause = True", "def run(address, nsteps, speed1=6, speed2=6):\n m1i = 0\n m2i = 0\n for i in range(nsteps):\n m1i = _speed_action(i, speed1, m1i)\n m2i = _speed_action(i, speed2, m2i)\n \n byte_to_send = MOTORS[m1i][m2i] \n try:\n i2c.write(address, byte_to_send)\n except:\n display.show(Image.SURPRISED)\n sleep(1000)\n break\n sleep(5)", "def test_multiple_games(self, iteration=10):\n # TODO: multithread?\n for i in range(iteration):\n self.test_one_game()", "def verilog_thread(name, step):\n run_command([\"./run_cadence.sh\", name, str(step), pdn, supply])", "def runSimulation(numSteps):\r\n\r\n # TO DO\r\n #pass\r\n rabbits = []\r\n foxes = []\r\n for i in range(numSteps):\r\n rabbitGrowth()\r\n foxGrowth()\r\n rabbits.append(CURRENTRABBITPOP)\r\n foxes.append(CURRENTFOXPOP)\r\n return rabbits, foxes", "def runSimulation(numSteps):\r\n\r\n # TO DO\r\n #pass\r\n rabbits = []\r\n foxes = []\r\n for i in range(numSteps):\r\n rabbitGrowth()\r\n foxGrowth()\r\n rabbits.append(CURRENTRABBITPOP)\r\n foxes.append(CURRENTFOXPOP)\r\n return rabbits, foxes", "def process(self, args):\n for benchmark_file in args.benchmark_files:\n self.process_individual_file(benchmark_file)\n self.total_files += 1", "def Run(benchmark_spec):\n _UpdateBenchmarkSpecWithFlags(benchmark_spec)\n vm = benchmark_spec.vms[0]\n\n if benchmark_spec.tpus:\n mnist_benchmark_script = 'mnist_tpu.py'\n mnist_benchmark_cmd = ('cd tpu/models && '\n 'export PYTHONPATH=$(pwd) && '\n 'cd official/mnist && '\n 'python {script} '\n '--data_dir={data_dir} '\n '--iterations={iterations} '\n '--model_dir={model_dir} '\n '--batch_size={batch_size}'.format(\n script=mnist_benchmark_script,\n data_dir=benchmark_spec.data_dir,\n iterations=benchmark_spec.iterations,\n model_dir=benchmark_spec.model_dir,\n batch_size=benchmark_spec.batch_size))\n else:\n mnist_benchmark_script = 'mnist.py'\n mnist_benchmark_cmd = ('cd models && '\n 'export PYTHONPATH=$(pwd) && '\n 'cd official/mnist && '\n 'python {script} '\n '--data_dir={data_dir} '\n '--model_dir={model_dir} '\n '--batch_size={batch_size} '.format(\n script=mnist_benchmark_script,\n data_dir=benchmark_spec.data_dir,\n model_dir=benchmark_spec.model_dir,\n batch_size=benchmark_spec.batch_size))\n\n if nvidia_driver.CheckNvidiaGpuExists(vm):\n mnist_benchmark_cmd = '{env} {cmd}'.format(\n env=tensorflow.GetEnvironmentVars(vm), cmd=mnist_benchmark_cmd)\n samples = []\n metadata = CreateMetadataDict(benchmark_spec)\n\n if benchmark_spec.train_steps > 0:\n if benchmark_spec.tpus:\n tpu = benchmark_spec.tpu_groups['train'].GetName()\n num_shards = '--num_shards={}'.format(\n benchmark_spec.tpu_groups['train'].GetNumShards())\n else:\n tpu = num_shards = ''\n\n if benchmark_spec.tpus:\n mnist_benchmark_train_cmd = (\n '{cmd} --tpu={tpu} --use_tpu={use_tpu} --train_steps={train_steps} '\n '{num_shards} --noenable_predict'.format(\n cmd=mnist_benchmark_cmd,\n tpu=tpu,\n use_tpu=bool(benchmark_spec.tpus),\n train_steps=benchmark_spec.train_steps,\n num_shards=num_shards))\n else:\n mnist_benchmark_train_cmd = (\n '{cmd} --train_epochs={train_epochs} '.format(\n cmd=mnist_benchmark_cmd,\n train_epochs=benchmark_spec.train_epochs))\n\n start = time.time()\n stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_train_cmd)\n elapsed_seconds = (time.time() - start)\n samples.extend(MakeSamplesFromTrainOutput(\n metadata, stdout + stderr, elapsed_seconds, benchmark_spec.train_steps))\n\n if benchmark_spec.eval_steps > 0:\n if benchmark_spec.tpus:\n mnist_benchmark_eval_cmd = (\n '{cmd} --tpu={tpu} --use_tpu={use_tpu} --eval_steps={eval_steps}'\n .format(\n cmd=mnist_benchmark_cmd,\n use_tpu=bool(benchmark_spec.tpus),\n tpu=benchmark_spec.tpu_groups['eval'].GetName(),\n eval_steps=benchmark_spec.eval_steps))\n else:\n mnist_benchmark_eval_cmd = ('{cmd} --eval_steps={eval_steps}'.format(\n cmd=mnist_benchmark_cmd, eval_steps=benchmark_spec.eval_steps))\n\n stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_eval_cmd)\n samples.extend(MakeSamplesFromEvalOutput(metadata, stdout + stderr,\n elapsed_seconds))\n return samples", "def threadsInBatches_run(l_threadAnalysis):\n index = 1\n if self.numThreads > total:\n self.numThreads = total\n threadFullLoops = int(total / self.numThreads)\n threadRem = total % self.numThreads\n alreadyRunCount = thread_batch(\n l_threadAnalysis,\n threadFullLoops,\n self.numThreads,\n 0)\n nextRunCount = thread_batch(\n l_threadAnalysis,\n 1,\n threadRem,\n alreadyRunCount)", "def train(self, steps):\r\n for e in range(steps):\r\n # do something...\r\n pass\r\n return self.get_value_function()", "def run_benchmark(curl, benchmark, test_config = TestConfig()):\n\n warmup_runs = benchmark.warmup_runs\n benchmark_runs = benchmark.benchmark_runs\n message = '' #Message is name of benchmark... print it?\n\n if (warmup_runs <= 0):\n raise Exception(\"Invalid number of warmup runs, must be > 0 :\" + warmup_runs)\n if (benchmark_runs <= 0):\n raise Exception(\"Invalid number of benchmark runs, must be > 0 :\" + benchmark_runs)\n\n #Initialize variables to store output\n output = BenchmarkResult()\n output.name = benchmark.name\n output.group = benchmark.group\n metricnames = list(benchmark.metrics)\n metricvalues = [METRICS[name] for name in metricnames] # Metric variable for curl, to avoid hash lookup for every metric name\n results = [list() for x in xrange(0, len(metricnames))] # Initialize arrays to store results for each metric\n\n curl.setopt(pycurl.WRITEFUNCTION, lambda x: None) #Do not store actual response body at all.\n\n #Benchmark warm-up to allow for caching, JIT compiling, on client\n logging.info('Warmup: ' + message + ' started')\n for x in xrange(0, warmup_runs):\n if benchmark.method == u'POST' or benchmark.method == u'PUT':\n curl.setopt(curl.READFUNCTION, StringIO.StringIO(benchmark.body).read)\n curl.perform()\n logging.info('Warmup: ' + message + ' finished')\n\n logging.info('Benchmark: ' + message + ' starting')\n\n for x in xrange(0, benchmark_runs): # Run the actual benchmarks\n if benchmark.method == u'POST' or benchmark.method == u'PUT':\n curl.setopt(curl.READFUNCTION, StringIO.StringIO(benchmark.body).read)\n\n try: # Run the curl call, if it errors, then add to failure counts for benchmark\n curl.perform()\n except Exception:\n output.failures = output.failures + 1\n continue # Skip metrics collection\n\n # Get all metrics values for this run, and store to metric lists\n for i in xrange(0, len(metricnames)):\n results[i].append( curl.getinfo(metricvalues[i]) )\n\n logging.info('Benchmark: ' + message + ' ending')\n\n temp_results = dict()\n for i in xrange(0, len(metricnames)):\n temp_results[metricnames[i]] = results[i]\n output.results = temp_results\n\n curl.close()\n return analyze_benchmark_results(output, benchmark)", "def run_stage_loop(cls, _opts, tests_results, put_next_stage):\n for _, result in tests_results:\n put_next_stage(result)", "def _run_benchmark(self, params):\n logging.info('Running benchmark [%s]', self._get_name())\n params = benchmark_cnn.setup(params)\n bench = benchmark_cnn.BenchmarkCNN(params)\n bench.print_info()\n stats = bench.run()\n extras = {}\n extras['examples_per_sec'] = stats.get('images_per_sec')\n if 'last_average_loss' in stats:\n extras['last_average_loss'] = stats['last_average_loss']\n if 'top_1_accuracy' in stats:\n extras['top_1_accuracy'] = stats['top_1_accuracy']\n if 'top_5_accuracy' in stats:\n extras['top_5_accuracy'] = stats['top_5_accuracy']\n self.report_benchmark(\n iters=stats.get('num_steps'),\n wall_time=stats.get('average_wall_time'),\n extras=extras)", "def batch_steps(num_examples, batch_size):\n steps = num_examples // batch_size\n if num_examples % batch_size > 0:\n steps += 1\n return steps", "def run_all_iterations(self):\n self.start_time = time.time()\n for _ in xrange(self.iterations):\n self.run_iteration()\n self.elapsed_time = time.time() - self.start_time\n\n self.print_statistics()", "def run(self, repetitions, **kwargs):\n\t\tself.sampler.sample(repetitions, **kwargs)", "def test_number_of_steps(self):\n class Mock(object):\n def __init__(self):\n self.count = 0\n\n def evolve(self, t, dt):\n self.count += 1\n\n G = Mock()\n sim = simulation.Simulation(G, dt=0.1)\n\n sim.run(100.0)\n self.assertEqual(G.count, 1000)\n\n G = Mock()\n sim = simulation.Simulation(G, dt=0.2)\n sim.run(100.2)\n self.assertEqual(G.count, 501)", "def run_benchmark():\n import argparse\n parser = argparse.ArgumentParser(description='Benchmark alchemically modified system against unmodified system.')\n parser.add_argument('--platform', dest='platform_name', action='store', default=None, help='platform name to benchmark (default: None)')\n options = parser.parse_args()\n\n from sams.tests import testsystems\n for testsystem_name in ['AblImatinibExplicitAlchemical']:\n cls = getattr(testsystems, testsystem_name)\n testsystem = cls()\n factory_args = { 'ligand_atoms' : testsystem.alchemical_atoms, 'receptor_atoms' : range(0,4266) }\n benchmark(testsystem.system, testsystem.positions, platform_name=options.platform_name, nsteps=5000, timestep=1.0*unit.femtoseconds, factory_args=factory_args)", "def run_benchmark(self, test_config, instance, copy=0):\n # Timestamp and other values added for reporting\n result_dir = self.results_directory(test_config)\n test_config['timestamp'] = int(time.time())\n test_config['workspace'] = self.workspace\n cmd = self._cmd_builder(test_config)\n test_config['cmd'] = cmd\n total_batches = test_config['total_batches']\n\n test_home = os.path.join(self.bench_home, test_config['cmd_path'])\n\n # Write config to results folder\n config_file_out = os.path.join(result_dir, 'config.yaml')\n config_out = open(config_file_out, 'w')\n config_out.write(yaml.dump(test_config))\n config_out.close()\n\n # TODO(tobyboyd@): No longer distributed remove threads.\n worker_threads = []\n i = 0\n cmd = 'cd {}; {}'.format(test_home, cmd)\n print('[{}] worker | Run benchmark({}):{}'.format(\n copy, test_config['test_id'], cmd))\n stdout_file = os.path.join(result_dir, 'worker_%d_stdout.log' % i)\n stderr_file = os.path.join(result_dir, 'worker_%d_stderr.log' % i)\n t = instance.ExecuteCommandInThread(\n cmd, stdout_file, stderr_file, print_error=True)\n worker_threads.append(t)\n\n # Wait for log file to appear\n wait_time = 0\n while t.is_alive() and not os.path.isfile(stdout_file):\n print('Waiting for log file. Waited for {} seconds.'.format(wait_time))\n time.sleep(2)\n wait_time += 2\n\n # TODO(tobyboyd@) fix fragile check for batch to stop on.\n # Example: Epoch: [0][130/40037] Time 0.397\n batch_killer = '{}/'.format(total_batches)\n while t.is_alive():\n with open(stdout_file, 'r') as log:\n for line in log:\n if batch_killer in line:\n print('{} batches complete. Kill Thread.'.format(batch_killer))\n instance.kill_processes()\n break\n time.sleep(5)\n\n for t in worker_threads:\n t.join()\n\n return result_dir", "def test_numbers(number):\n print(\"\\nRunning test_numbers with {}\".format(number))", "def run_benchmark(env: Env, in_file):\n\n print('Running benchmarks in', in_file.name)\n # Run file_path through mlir_to_bef and bef_executor and extract the\n # benchmark result.\n return env.run_mlir(in_file.read())", "def test_worker_steps(self):\n target_thread_count = 3\n\n class StepCounter(object):\n \"\"\"\n Count the number of times a step is taken in the worker.\n \"\"\"\n def __init__(self):\n self.a_steps = 0\n self.b_steps = 0\n self.c_steps = 0\n\n def __call__(self, index, thread_count):\n self.a_steps += 1\n yield\n self.b_steps += 1\n yield\n self.c_steps += 1\n\n step_counter = StepCounter()\n\n rusher = Rusher(step_counter, target_thread_count)\n rusher.rush()\n self.assertEqual(step_counter.a_steps, target_thread_count)\n self.assertEqual(step_counter.b_steps, target_thread_count)\n self.assertEqual(step_counter.c_steps, 0)", "def train(self, num_episodes, max_episode_steps=100, save_freq=100, render=False):\n while self.episodes_done < num_episodes:\n self.trainOneEpisode(num_episodes, max_episode_steps, save_freq, render)\n self.saveCheckpoint()", "def sciml_bench_run(smlb_in: RuntimeIn, smlb_out: RuntimeOut):\n # activate monitor\n # Note: To use smlb_out, you must activate it, passing the rank\n # information initialized by your distributed learning environment;\n # for a non-distributed benchmark, simply pass rank=0, local_rank=0\n # and activate_log_on_host(_device)=False; here we use True for\n # demonstration -- the log on host0 and device0 will be the same as\n # that on console except for some small differences in time\n # measurements.\n smlb_out.activate(rank=0, local_rank=0, activate_log_on_host=True,\n activate_log_on_device=True, console_on_screen=True)\n\n # log top level process\n # Note: Calling begin(), ended() and message() on smlb_out.log means\n # calling these functions on console, host and device; nothing\n # happens when calling these functions on an unactivated logger.\n log = smlb_out.log\n log.begin('Running benchmark MNIST_tf_keras')\n\n # parse input arguments (only batch_size and epochs)\n # Note: Use try_get() to get a benchmark-specific argument safely from\n # smlb_in.bench_args (passed by users via -b).\n with log.subproc('Parsing input arguments'):\n # hyperparameters\n batch_size = smlb_in.bench_args.try_get('batch_size', default=64)\n epochs = smlb_in.bench_args.try_get('epochs', default=2)\n log.message(f'batch_size = {batch_size}')\n log.message(f'epochs = {epochs}')\n\n # create datasets\n with log.subproc('Creating datasets'):\n dataset_dir = smlb_in.dataset_dir\n train_set = create_dataset_mnist(dataset_dir / 'train.hdf5', batch_size)\n test_set = create_dataset_mnist(dataset_dir / 'test.hdf5', batch_size)\n log.message(f'Dataset directory: {dataset_dir}')\n\n # create model\n with log.subproc('Creating CNN model'):\n model = create_model_mnist()\n\n # train model\n log.begin('Training CNN model')\n # fit()\n with log.subproc('Running model.fit()'):\n # stamp model.fit in system monitor\n # Note: smlb_out.system will monitor system usage regularly; use\n # smlb_out.system.stamp_event() to stamp an event in the report\n smlb_out.system.stamp_event('model.fit')\n history = model.fit(train_set, epochs=epochs, batch_size=batch_size,\n validation_data=test_set, verbose=0,\n callbacks=[LogEpochCallback(smlb_out)])\n # save model\n with log.subproc('Saving model weights'):\n weights_file = smlb_in.output_dir / 'model_weights.h5'\n model.save(weights_file)\n log.message(f'Saved to: {weights_file}')\n # save history\n with log.subproc('Saving training history'):\n history_file = smlb_in.output_dir / 'training_history.yml'\n with open(history_file, 'w') as handle:\n yaml.dump(history.history, handle)\n log.message(f'Saved to: {history_file}')\n log.ended('Training CNN model')\n\n # predict\n with log.subproc('Making predictions on test set'):\n with h5py.File(dataset_dir / 'test.hdf5', 'r') as h5_file:\n # stamp model.predict in system monitor\n smlb_out.system.stamp_event('model.predict')\n pred = model.predict(np.expand_dims(h5_file['image'][:], -1) / 255)\n correct = np.sum(pred.argmax(axis=1) == h5_file['label'][:])\n log.message(f'{correct} correct predictions for {len(pred)} images '\n f'(accuracy: {correct / len(pred) * 100:.2f}%)')\n\n # end top level\n log.ended('Running benchmark MNIST_tf_keras')", "def run(self):\n\n while not self.__done:\n self.single_cycle()\n\n \"\"\"\n while not self.__done:\n self.step()\n self.debug()\n \"\"\"", "def run(self):\n\n if self.count < self.max_pages:\n self.engine_redis.set(self.crawl_id + \"_count\", self.count)\n self.count = self.count + self.speed\n else:\n self.engine_redis.set(self.crawl_id + \"_count\", -2)\n\n reactor.callLater(5, self.run)", "def run_trials(f, n):\n\tfor value in range(2, 3):\n\t\tprint(\"{:>3}:{:>5}\".format(value, f(n, value)))", "def execute_series(self):\n for n in xrange(self.conf[\"n_runs\"]):\n self.runs[n].execute()", "def simulate(self, num_games):\r\n # self.runs = num_games #Initializes a tracker for the number of runs\r\n for _ in range(num_games):\r\n self.results.append(self._simulate_once())\r\n return self.results", "def run(method, n):\n \n m1,m2 = generate(n)\n \n start = time.time()\n method(m1,m2)\n end = time.time()\n \n exe = end - start\n \n return exe", "def every_n_iters(self, runner: Runner, n: int):\n if runner.iter < self.start_iter:\n return True\n return (runner.iter + 1 - self.start_iter) % n == 0 if n > 0 else False", "def experiment(agent, steps, runs, initialize=None):\n result = 0\n for r in range(runs):\n result += simulate(agent, steps, initialize)\n return result / runs", "def test_time(cmd, samples=16, warmup=4):\n # do testing\n print()\n avg_time = 0\n for s in range(samples + warmup):\n # report progress\n progress = s / (samples + warmup)\n print(CSI_UP + CSI_CLEARLN + \"Testing [{}%]\".format(floor(progress * 100)))\n\n output = shell(cmd) # run command\n tables = csv_mt.read_string(output, parse_float=True) # parse its output\n time = tables[\"statistics\"][\"time_us\"][0] # get its timing data\n\n # skip a few runs to let the system \"warm up\"\n if s >= warmup:\n avg_time += time / samples # compute average execution time\n\n # log the average time for this test case\n return avg_time", "def run_tests(output_dir, fstype):\n global options\n if options.debug:\n print \"Run NUMA test\"\n for num_disks in [2]:\n for num_dirs in range(1, 5):\n postmark = PostMarkTest(output_dir, fstype, num_disks, num_dirs)\n run_one_test(postmark)", "def test_run_sim():\n rnd = rand.Arrivals(31, 40)\n sim.run_sim(2, 1, 3, 4, 24, rnd)", "def benchmark(trials:int):\n def benchmark_method(function:Callable[[int],int]) -> Callable[[int],Tuple[float,str]]:\n def time_wrapper(*args) -> Tuple[float,str]:\n \"\"\" Return the time taken to run a fibonacci method in microseconds \"\"\"\n t1 = time.time()\n for _ in range(trials):\n function(*args)\n return ((time.time()-t1)/trials) * 1e6, function.__name__\n return time_wrapper\n return benchmark_method", "def run_timesteps(self, nsteps=1):\n if not self.initialized:\n raise RuntimeError(\"OversetSimulation has not been initialized\")\n\n wclabels = \"Pre Conn Solve Post\".split()\n tstart = self.last_timestep + 1\n tend = self.last_timestep + 1 + nsteps\n self.printer.echo(\"Running %d timesteps starting from %d\"%(nsteps, tstart))\n for nt in range(tstart, tend):\n with self.timer(\"Pre\", incremental=True):\n for ss in self.solvers:\n ss.pre_advance_stage1()\n\n with self.timer(\"Conn\", incremental=True):\n if self._do_connectivity(nt):\n self.perform_overset_connectivity()\n\n with self.timer(\"Pre\", incremental=False):\n for ss in self.solvers:\n ss.pre_advance_stage2()\n\n with self.timer(\"Conn\"):\n self.exchange_solution()\n\n with self.timer(\"Solve\"):\n for ss in self.solvers:\n ss.advance_timestep()\n\n with self.timer(\"Post\"):\n for ss in self.solvers:\n ss.post_advance()\n\n self.comm.Barrier()\n wctime = self.timer.get_timings(wclabels)\n wctime_str = ' '.join(\"%s: %.4f\"%(k, v) for k, v in wctime.items())\n self.printer.echo(\"WCTime:\", \"%5d\"%nt, wctime_str, \"Total:\",\n \"%.4f\"%sum(wctime.values()))\n self.last_timestep = tend", "def next ( num = 1 ) :\n return run ( num )", "def _run():\n subprocess.check_call(\n [\n \"tools/bazel\",\n \"build\",\n \"-c\",\n \"opt\",\n \"test/core/memory_usage/memory_usage_test\",\n ]\n )\n ret = {}\n for name, benchmark_args in _BENCHMARKS.items():\n for scenario, extra_args in _SCENARIOS.items():\n # TODO(chenancy) Remove when minstack is implemented for channel\n if name == \"channel\" and scenario == \"minstack\":\n continue\n try:\n output = subprocess.check_output(\n [\n \"bazel-bin/test/core/memory_usage/memory_usage_test\",\n ]\n + benchmark_args\n + extra_args\n )\n except subprocess.CalledProcessError as e:\n print(\"Error running benchmark:\", e)\n continue\n for line in output.splitlines():\n for key, (pattern, conversion) in _INTERESTING.items():\n m = re.match(pattern, line)\n if m:\n ret[scenario + \": \" + key] = conversion(m.group(1))\n return ret", "def run_simulations(self,i_iteration,n_samples=None,filename=None):\n\n assert type(i_iteration) is int\n assert type(n_samples) in [type(None),int]\n assert type(filename) in [type(None),str]\n\n\n # define some convenience local variables for readability\n i = i_iteration\n if n_samples is not None:\n _n_samples = self.configuration.sampling_type[i]['n_samples']\n else:\n _n_samples = n_samples\n\n _sampling_type = self.configuration.sampling_type[i]['type']\n if filename is not None:\n _filename = self.configuration.sampling_type[i][n_samples]\n else:\n pass", "def start(self, total: int, name: str = None):\n\n # Clean the run\n self.test_run = RunElements()\n\n if name is not None:\n self.test_run.name = name\n\n self.test_run.total = total\n\n # Init the start run date\n from datetime import datetime\n self.test_run.date = datetime.now().strftime(\"%d-%m-%Y (%H:%M)\")\n\n self.__send_all()", "def run(self, r, niters=10000):\n validator.validate_type(r, rng, param_name='r')\n validator.validate_positive(niters, param_name='niters')\n for _ in xrange(niters):\n # This goes against every object-oriented bone in my body, but the interface must be satisfied\n # And actually Python won't even let me do this because I'm accessing a method in a C++ class...\n # I'd have to write this whole thing in Cython or change the state interface to expose all these\n # functions separately...which might actually be worth doing.\n self._latent._thisptr.get()[0].sample_aux()\n self._latent._thisptr.get()[0].sample_state()\n self._latent._thisptr.get()[0].clear_empty_states()\n self._latent._thisptr.get()[0].sample_hypers(20)\n self._latent._thisptr.get()[0].sample_pi()\n self._latent._thisptr.get()[0].sample_phi()", "def test(numTrials):\n # Your Code Here\n hits = 0.0\n for i in range(numTrials):\n result = trial()\n #print result\n hits += result\n return hits / numTrials", "def number_of_iterations(self) -> int:\n pass", "def run(self):\r\n try:\r\n self.loader.find_and_load_step_definitions()\r\n except StepLoadingError, e:\r\n print \"Error loading step definitions:\\n\", e\r\n return\r\n\r\n results = []\r\n if self.explicit_features:\r\n features_files = self.explicit_features\r\n else:\r\n features_files = self.loader.find_feature_files()\r\n if self.random:\r\n random.shuffle(features_files)\r\n\r\n if not features_files:\r\n self.output.print_no_features_found(self.loader.base_dir)\r\n return\r\n\r\n processes = Pool(processes=self.parallelization)\r\n test_results_it = processes.imap_unordered(\r\n worker_process, [(self, filename) for filename in features_files]\r\n )\r\n \r\n all_total = ParallelTotalResult()\r\n for result in test_results_it:\r\n all_total += result['total']\r\n sys.stdout.write(result['stdout'])\r\n sys.stderr.write(result['stderr'])\r\n\r\n return all_total", "def run_commands(command, number_to_run, temp_file):\n global g_max_runtime_secs\n global g_finished_this_unit_test\n\n temp_string = command.split()\n testname = temp_string[-1]\n temp_string = testname.split('/')\n\n full_command = command + ' > ' + temp_file\n g_finished_this_unit_test = False\n\n for run_index in range(0, number_to_run):\n\n if g_finished_this_unit_test:\n break\n\n child = subprocess.Popen(full_command, shell=True)\n\n while child.poll() is None:\n time.sleep(20)\n# subprocess.call(full_command, shell=True) # run the command,\n\n with open(temp_file, 'r') as thefile: # go into tempfile and grab test run info\n for each_line in thefile:\n\n temp_string = each_line.split()\n if len(temp_string) > 0:\n if temp_string[0] == 'PASS':\n test_time = temp_string[2]\n try:\n runtime = float(test_time[:-1])\n\n print(\"Unit test run time is {0}\".format(runtime))\n if runtime > g_max_runtime_secs:\n g_finished_this_unit_test = True\n\n except:\n print(\"Cannot convert run time. It is {0}\\n\".format(runtime))\n break" ]
[ "0.6977534", "0.68559444", "0.68559444", "0.6314905", "0.6159092", "0.60951483", "0.603625", "0.59564173", "0.5941269", "0.59056836", "0.59001297", "0.5890866", "0.58668673", "0.58663905", "0.58295834", "0.5819737", "0.5806568", "0.5786354", "0.5773823", "0.5768865", "0.57342255", "0.5717713", "0.5706448", "0.5675689", "0.56614476", "0.56509656", "0.5631863", "0.56305426", "0.5628702", "0.56249124", "0.5538285", "0.55346733", "0.55201626", "0.55108285", "0.54878366", "0.5481767", "0.547965", "0.5478106", "0.54758817", "0.5471804", "0.5468016", "0.54622877", "0.5444621", "0.5432485", "0.54272085", "0.5413742", "0.54030335", "0.5402772", "0.5393026", "0.5387628", "0.53781956", "0.5377465", "0.53745645", "0.53671145", "0.53490686", "0.5347224", "0.53358424", "0.5329832", "0.5314868", "0.5314868", "0.53141326", "0.53134865", "0.53117716", "0.5296811", "0.5293995", "0.5292267", "0.5290666", "0.5287545", "0.527833", "0.5275583", "0.52675724", "0.52512336", "0.5250008", "0.5249018", "0.5241011", "0.5240223", "0.5239714", "0.52258927", "0.5217146", "0.5212264", "0.5211445", "0.5210532", "0.5200782", "0.5200042", "0.51929814", "0.5183438", "0.5174735", "0.5171874", "0.51623374", "0.516147", "0.516046", "0.5159556", "0.51477283", "0.5147181", "0.51445013", "0.5142253", "0.5134693", "0.5132766", "0.51255375", "0.51217854" ]
0.64317936
3
Execute the benchmark and report the performance.
def execute(self): print_verbose_messages = (self.verbose and self.device.communicator.rank == 0) # Ensure that all ops are attached (needed for is_tuning_complete). self.run(0) if print_verbose_messages: print(f'Running {type(self).__name__} benchmark') if print_verbose_messages: print(f'.. warming up for {self.warmup_steps} steps') self.run(self.warmup_steps) if (isinstance(self.device, hoomd.device.GPU) and hasattr(self.sim.operations, 'is_tuning_complete')): while not self.sim.operations.is_tuning_complete: if print_verbose_messages: print('.. autotuning GPU kernel parameters for ' f'{self.warmup_steps} steps') self.run(self.warmup_steps) if print_verbose_messages: print(f'.. running for {self.benchmark_steps} steps ' f'{self.repeat} time(s)') # benchmark performance = [] if isinstance(self.device, hoomd.device.GPU): with self.device.enable_profiling(): for i in range(self.repeat): self.run(self.benchmark_steps) performance.append(self.get_performance()) if print_verbose_messages: print(f'.. {performance[-1]} {self.units}') else: for i in range(self.repeat): self.run(self.benchmark_steps) performance.append(self.get_performance()) if print_verbose_messages: print(f'.. {performance[-1]} {self.units}') return performance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def benchmark(self):\n logger.info(self.benchmark.__doc__)\n return self.run(self.benchmark_profile())", "def _run_benchmark(self, params):\n logging.info('Running benchmark [%s]', self._get_name())\n params = benchmark_cnn.setup(params)\n bench = benchmark_cnn.BenchmarkCNN(params)\n bench.print_info()\n stats = bench.run()\n extras = {}\n extras['examples_per_sec'] = stats.get('images_per_sec')\n if 'last_average_loss' in stats:\n extras['last_average_loss'] = stats['last_average_loss']\n if 'top_1_accuracy' in stats:\n extras['top_1_accuracy'] = stats['top_1_accuracy']\n if 'top_5_accuracy' in stats:\n extras['top_5_accuracy'] = stats['top_5_accuracy']\n self.report_benchmark(\n iters=stats.get('num_steps'),\n wall_time=stats.get('average_wall_time'),\n extras=extras)", "def main(cls):\n parser = cls.make_argument_parser()\n args = parser.parse_args()\n args.device = make_hoomd_device(args)\n benchmark = cls(**vars(args))\n performance = benchmark.execute()\n\n if args.device.communicator.rank == 0:\n print(f'{numpy.mean(performance)}')", "def Run(benchmark_spec):\n vms = benchmark_spec.vms\n master_vm = vms[0]\n run_command = 'cd %s && %s ./%s' % (hpcg.HPCG_DIR,\n _GetEnvironmentVars(benchmark_spec),\n RUN_SCRIPT)\n output, _ = master_vm.RobustRemoteCommand(run_command)\n return _MakeSamplesFromOutput(benchmark_spec, output)", "def run_benchmark(curl, benchmark, test_config = TestConfig()):\n\n warmup_runs = benchmark.warmup_runs\n benchmark_runs = benchmark.benchmark_runs\n message = '' #Message is name of benchmark... print it?\n\n if (warmup_runs <= 0):\n raise Exception(\"Invalid number of warmup runs, must be > 0 :\" + warmup_runs)\n if (benchmark_runs <= 0):\n raise Exception(\"Invalid number of benchmark runs, must be > 0 :\" + benchmark_runs)\n\n #Initialize variables to store output\n output = BenchmarkResult()\n output.name = benchmark.name\n output.group = benchmark.group\n metricnames = list(benchmark.metrics)\n metricvalues = [METRICS[name] for name in metricnames] # Metric variable for curl, to avoid hash lookup for every metric name\n results = [list() for x in xrange(0, len(metricnames))] # Initialize arrays to store results for each metric\n\n curl.setopt(pycurl.WRITEFUNCTION, lambda x: None) #Do not store actual response body at all.\n\n #Benchmark warm-up to allow for caching, JIT compiling, on client\n logging.info('Warmup: ' + message + ' started')\n for x in xrange(0, warmup_runs):\n if benchmark.method == u'POST' or benchmark.method == u'PUT':\n curl.setopt(curl.READFUNCTION, StringIO.StringIO(benchmark.body).read)\n curl.perform()\n logging.info('Warmup: ' + message + ' finished')\n\n logging.info('Benchmark: ' + message + ' starting')\n\n for x in xrange(0, benchmark_runs): # Run the actual benchmarks\n if benchmark.method == u'POST' or benchmark.method == u'PUT':\n curl.setopt(curl.READFUNCTION, StringIO.StringIO(benchmark.body).read)\n\n try: # Run the curl call, if it errors, then add to failure counts for benchmark\n curl.perform()\n except Exception:\n output.failures = output.failures + 1\n continue # Skip metrics collection\n\n # Get all metrics values for this run, and store to metric lists\n for i in xrange(0, len(metricnames)):\n results[i].append( curl.getinfo(metricvalues[i]) )\n\n logging.info('Benchmark: ' + message + ' ending')\n\n temp_results = dict()\n for i in xrange(0, len(metricnames)):\n temp_results[metricnames[i]] = results[i]\n output.results = temp_results\n\n curl.close()\n return analyze_benchmark_results(output, benchmark)", "def main(ctx: click.Context):\n click.secho(\"MySQL Benchmark\", bold=True)\n results = []\n with click.progressbar(range(ctx.obj[\"count\"])) as bar:\n for number in bar:\n response = requests.get(url=f'{ctx.obj[\"hostname\"]}/api/mysql.php')\n if response.status_code != 200:\n raise click.ClickException(\n f'{ctx.obj[\"hostname\"]}/api/mysql.php Not Found!'\n )\n\n response = requests.get(url=f'{ctx.obj[\"hostname\"]}/api/mysql.php')\n response.raise_for_status()\n results.append(\n BenchmarkResult(\n timestamp=time.time(), number=number, data=response.json()\n )\n )\n time.sleep(ctx.obj[\"sleep\"])\n\n insert_timings = get_timings(results, \"insert\")\n insert_single_transaction_timings = get_timings(\n results, \"insertSingleTransaction\"\n )\n result = {\n \"results\": results,\n \"timings\": {\n \"insert\": calculate_timing_stats(insert_timings),\n \"insert_single_transaction\": calculate_timing_stats(\n insert_single_transaction_timings\n ),\n },\n }\n table = render_table(result)\n click.echo(table)", "def benchmark(options):\n # Prepare experiments\n with open(options['<benchmark>']) as f:\n benchmark_config = json.loads(f.read())\n generate_agent_configs(benchmark_config)\n experiments = product(benchmark_config['environments'], benchmark_config['agents'], [options])\n\n # Run evaluations\n with Pool(processes=int(options['--processes'])) as pool:\n results = pool.starmap(evaluate, experiments)\n\n # Clean temporary config files\n generate_agent_configs(benchmark_config, clean=True)\n\n # Write evaluations summary\n benchmark_filename = os.path.join(Evaluation.OUTPUT_FOLDER, '{}_{}.{}.json'.format(\n BENCHMARK_FILE, datetime.datetime.now().strftime('%Y%m%d-%H%M%S'), os.getpid()))\n with open(benchmark_filename, 'w') as f:\n json.dump(results, f, sort_keys=True, indent=4)\n gym.logger.info('Benchmark done. Summary written in: {}'.format(benchmark_filename))", "def run_report(self) -> None:\n t1 = self.t1 or time.time()\n\n dt = t1 - self.t0\n\n if dt and self.max_tasks:\n speed = len(self.statistics) / dt / self.max_tasks\n else:\n speed = 0\n\n LOGGER.info('CRAWLER STATISTICS REPORT')\n\n show = list(self.statistics)\n show.sort(key=lambda stat: str(stat.url))\n\n for stat in show:\n self.log_url_metadata(stat)\n\n LOGGER.info(\n f'Completed parsing {len(self.statistics)} urls in {dt} secs; (max_tasks={self.max_tasks}) ({speed} urls per second per task)', # pylint: disable=C0301 # noqa: E501\n )\n\n LOGGER.info(f'Remaining: {self.queue.qsize()}')\n LOGGER.info(f'Total Statistics: {len(self.statistics)}')\n LOGGER.info(f'Datetime: {time.ctime()} local time')", "def Run(benchmark_spec):\n vms = benchmark_spec.vms\n results = []\n\n logging.info('Iperf Results:')\n\n # Send traffic in both directions\n for sending_vm, receiving_vm in vms, reversed(vms):\n # Send using external IP addresses\n if vm_util.ShouldRunOnExternalIpAddress():\n results.append(_RunIperf(sending_vm,\n receiving_vm,\n receiving_vm.ip_address,\n 'external'))\n\n # Send using internal IP addresses\n if vm_util.ShouldRunOnInternalIpAddress(sending_vm,\n receiving_vm):\n results.append(_RunIperf(sending_vm,\n receiving_vm,\n receiving_vm.internal_ip,\n 'internal'))\n\n return results", "def run_benchmark(self, test_config, instance, copy=0):\n # Timestamp and other values added for reporting\n result_dir = self.results_directory(test_config)\n test_config['timestamp'] = int(time.time())\n test_config['workspace'] = self.workspace\n cmd = self._cmd_builder(test_config)\n test_config['cmd'] = cmd\n total_batches = test_config['total_batches']\n\n test_home = os.path.join(self.bench_home, test_config['cmd_path'])\n\n # Write config to results folder\n config_file_out = os.path.join(result_dir, 'config.yaml')\n config_out = open(config_file_out, 'w')\n config_out.write(yaml.dump(test_config))\n config_out.close()\n\n # TODO(tobyboyd@): No longer distributed remove threads.\n worker_threads = []\n i = 0\n cmd = 'cd {}; {}'.format(test_home, cmd)\n print('[{}] worker | Run benchmark({}):{}'.format(\n copy, test_config['test_id'], cmd))\n stdout_file = os.path.join(result_dir, 'worker_%d_stdout.log' % i)\n stderr_file = os.path.join(result_dir, 'worker_%d_stderr.log' % i)\n t = instance.ExecuteCommandInThread(\n cmd, stdout_file, stderr_file, print_error=True)\n worker_threads.append(t)\n\n # Wait for log file to appear\n wait_time = 0\n while t.is_alive() and not os.path.isfile(stdout_file):\n print('Waiting for log file. Waited for {} seconds.'.format(wait_time))\n time.sleep(2)\n wait_time += 2\n\n # TODO(tobyboyd@) fix fragile check for batch to stop on.\n # Example: Epoch: [0][130/40037] Time 0.397\n batch_killer = '{}/'.format(total_batches)\n while t.is_alive():\n with open(stdout_file, 'r') as log:\n for line in log:\n if batch_killer in line:\n print('{} batches complete. Kill Thread.'.format(batch_killer))\n instance.kill_processes()\n break\n time.sleep(5)\n\n for t in worker_threads:\n t.join()\n\n return result_dir", "def run_tests(self):\n with self.report.timer.record(\"run\"):\n self.result.report.extend(self._run_tests())", "def run_benchmark():\n import argparse\n parser = argparse.ArgumentParser(description='Benchmark alchemically modified system against unmodified system.')\n parser.add_argument('--platform', dest='platform_name', action='store', default=None, help='platform name to benchmark (default: None)')\n options = parser.parse_args()\n\n from sams.tests import testsystems\n for testsystem_name in ['AblImatinibExplicitAlchemical']:\n cls = getattr(testsystems, testsystem_name)\n testsystem = cls()\n factory_args = { 'ligand_atoms' : testsystem.alchemical_atoms, 'receptor_atoms' : range(0,4266) }\n benchmark(testsystem.system, testsystem.positions, platform_name=options.platform_name, nsteps=5000, timestep=1.0*unit.femtoseconds, factory_args=factory_args)", "def test_benchmark(self):\n\n proc = subprocess.Popen([\n sys.executable,\n benchmark.__file__,\n self.live_server_ws_url,\n ])\n for _ in range(0, 90, 5):\n time.sleep(5)\n if proc.returncode:\n break\n else:\n proc.terminate()\n proc.wait()\n assert proc.returncode == 0", "def main():\n logging.basicConfig(level=\"INFO\")\n assert len(sys.argv) == 2, \"Exactly one positional argument (path to the raw dataset) is \"\\\n \"needed. \\n\\nE.g. `python sparsity_benchmark ~/bff_data/final_table`\"\n\n # Prepares data for the benchmark, may take a while\n data_parameters = DATA_PARAMETERS.copy()\n data_parameters[\"input_file\"] = sys.argv[1]\n data_parameters[\"preprocessed_file\"] = os.path.join(\n os.path.dirname(data_parameters[\"input_file\"]),\n \"preprocessed_dataset.pkl\"\n )\n data_preprocessor = preprocess_dataset(data_parameters=data_parameters)\n\n # Note: the features here should be in range [0, ~1.2], according to the original experiments.\n # 0 corresponds to no data, everything else is linearly scaled from dB units.\n features, _ = data_preprocessor.load_dataset()\n\n logging.info(\"Starting benchmarks\")\n noisy_features = benchmark_noise(\n features=features,\n data_parameters=data_parameters,\n experiment_parameters=EXPERIMENT_PARAMETERS\n )\n benchmark_binarization(\n noisy_features=noisy_features,\n data_parameters=data_parameters,\n experiment_parameters=EXPERIMENT_PARAMETERS\n )\n logging.info(\"Done\")", "def _run():\n subprocess.check_call(\n [\n \"tools/bazel\",\n \"build\",\n \"-c\",\n \"opt\",\n \"test/core/memory_usage/memory_usage_test\",\n ]\n )\n ret = {}\n for name, benchmark_args in _BENCHMARKS.items():\n for scenario, extra_args in _SCENARIOS.items():\n # TODO(chenancy) Remove when minstack is implemented for channel\n if name == \"channel\" and scenario == \"minstack\":\n continue\n try:\n output = subprocess.check_output(\n [\n \"bazel-bin/test/core/memory_usage/memory_usage_test\",\n ]\n + benchmark_args\n + extra_args\n )\n except subprocess.CalledProcessError as e:\n print(\"Error running benchmark:\", e)\n continue\n for line in output.splitlines():\n for key, (pattern, conversion) in _INTERESTING.items():\n m = re.match(pattern, line)\n if m:\n ret[scenario + \": \" + key] = conversion(m.group(1))\n return ret", "def _run_benchmark(shell, shell_args, name, app, duration_seconds, measurements,\n verbose, android, save_traces):\n timeout = duration_seconds + _EXTRA_TIMEOUT\n benchmark_args = []\n benchmark_args.append('--app=' + app)\n benchmark_args.append('--duration=' + str(duration_seconds))\n\n output_file = None\n device_output_file = None\n if save_traces:\n output_file = 'benchmark-%s-%s.trace' % (name.replace(' ', '_'),\n time.strftime('%Y%m%d%H%M%S'))\n if android:\n device_output_file = os.path.join(shell.get_tmp_dir_path(), output_file)\n benchmark_args.append('--trace-output=' + device_output_file)\n else:\n benchmark_args.append('--trace-output=' + output_file)\n\n for measurement in measurements:\n benchmark_args.append(measurement)\n\n shell_args = list(shell_args)\n shell_args.append(_BENCHMARK_APP)\n shell_args.append('--force-offline-by-default')\n shell_args.append('--args-for=%s %s' % (_BENCHMARK_APP,\n ' '.join(benchmark_args)))\n\n if verbose:\n print 'shell arguments: ' + str(shell_args)\n return_code, output, did_time_out = shell.run_and_get_output(\n shell_args, timeout=timeout)\n\n if did_time_out:\n return False, 'timed out', output\n if return_code:\n return False, 'return code: ' + str(return_code), output\n\n # Pull the trace file even if some measurements are missing, as it can be\n # useful in debugging.\n if device_output_file:\n shell.pull_file(device_output_file, output_file, remove_original=True)\n\n return True, None, output", "def RunBenchmark(path_to_apk, run_label):\n # `path_to_apk` is similar to `./out/59.0.3071.132_arm_MonochromeStable.apk`\n chrome_version = ChromeVersion(path_to_apk.split('/')[-1].split('_')[0])\n subprocess.call(['adb', 'install', '-r', '-d', path_to_apk])\n subprocess.call([os.path.join(utils.CHROMIUM_SRC, 'tools',\n 'perf', 'run_benchmark'),\n '--browser=android-system-chrome',\n '--pageset-repeat=1', # could remove this later\n '--results-label=%s' % str(chrome_version),\n # TODO(wangge):not sure if we should run in compatibility\n # mode even for the later version, probably add a check in\n # caller to determine if we should run it in compatibility\n # mode and add an argument `run_in_compatibility_mode` to\n # the `RunBenchmark` function\n '--compatibility-mode=no-field-trials',\n '--compatibility-mode=ignore-certificate-errors',\n '--compatibility-mode=legacy-command-line-path',\n '--compatibility-mode=gpu-benchmarking-fallbacks',\n '--story-filter=wikipedia', # could remove this\n # thinking of adding an argument to the tool to set this\n '--output-dir=%s' % os.path.join(\n utils.APP_ROOT, 'results', run_label,\n str(chrome_version.milestone)),\n # thinking of adding an argument to the tool to set this too\n 'system_health.memory_mobile'])", "def run(self):\n self.speed_test.start()", "def main(benchmark, size=None, backend=None, repetitions=None, burnin=1, device=\"cpu\"):\n try:\n bm_module, bm_identifier = get_benchmark_module(benchmark)\n except ImportError as e:\n click.echo(f\"Error while loading benchmark {benchmark}: {e!s}\", err=True)\n raise click.Abort()\n\n available_backends = set(bm_module.__implementations__)\n\n if len(backend) == 0:\n backend = available_backends.copy()\n else:\n backend = set(backend)\n\n unsupported_backends = [b for b in backend if b not in available_backends]\n\n for b in unsupported_backends:\n click.echo(\n f'Backend \"{b}\" is not supported by chosen benchmark (skipping)', err=True\n )\n backend.remove(b)\n\n for b in backend.copy():\n try:\n with setup_functions[b](device=device) as bmod:\n click.echo(f\"Using {b} version {bmod.__version__}\")\n except BackendNotSupported as e:\n click.echo(\n f'Setup for backend \"{b}\" failed (skipping), reason: {e!s}', err=True\n )\n backend.remove(b)\n\n try:\n check_backend_conflicts(backend, device)\n except BackendConflict as exc:\n click.echo(f\"Backend conflict: {exc!s}\", err=True)\n raise click.Abort()\n\n runs = sorted(itertools.product(backend, size))\n\n if len(runs) == 0:\n click.echo(\"Nothing to do\")\n return\n\n timings = {run: [] for run in runs}\n\n if repetitions is None:\n click.echo(\"Estimating repetitions...\")\n repetitions = {}\n\n for b, s in runs:\n # use end-to-end runtime for repetition estimation\n def run_func():\n run = bm_module.get_callable(b, s, device=device)\n with setup_functions[b](device=device):\n run()\n\n repetitions[(b, s)] = estimate_repetitions(run_func)\n else:\n repetitions = {(b, s): repetitions for b, s in runs}\n\n all_runs = list(\n itertools.chain.from_iterable(\n [run] * (repetitions[run] + burnin) for run in runs\n )\n )\n random.shuffle(all_runs)\n\n results = {}\n checked = {r: False for r in runs}\n\n pbar = click.progressbar(\n label=f\"Running {len(all_runs)} benchmarks...\", length=len(runs)\n )\n\n try:\n with pbar:\n for (b, size) in all_runs:\n with setup_functions[b](device=device):\n run = bm_module.get_callable(b, size, device=device)\n with Timer() as t:\n res = run()\n\n # YOWO (you only warn once)\n if not checked[(b, size)]:\n if size in results:\n is_consistent = check_consistency(\n results[size], convert_to_numpy(res, b, device)\n )\n if not is_consistent:\n click.echo(\n f\"\\nWarning: inconsistent results for size {size}\",\n err=True,\n )\n else:\n results[size] = convert_to_numpy(res, b, device)\n checked[(b, size)] = True\n\n timings[(b, size)].append(t.elapsed)\n pbar.update(1.0 / (repetitions[(b, size)] + burnin))\n\n # push pbar to 100%\n pbar.update(1.0)\n\n for run in runs:\n assert len(timings[run]) == repetitions[run] + burnin\n\n finally:\n stats = compute_statistics(timings)\n click.echo(format_output(stats, bm_identifier, device=device))", "def run_benchmark(env: Env, in_file):\n\n print('Running benchmarks in', in_file.name)\n # Run file_path through mlir_to_bef and bef_executor and extract the\n # benchmark result.\n return env.run_mlir(in_file.read())", "def Stop():\n\n if global_options.loglevel >= 1 and global_benchmark:\n t = time.time() - global_starting_time\n global_options.stdlog.write(\n \"######### Time spent in benchmarked functions #########\\n\")\n global_options.stdlog.write(\"# function\\tseconds\\tpercent\\n\")\n for key, value in global_benchmark.items():\n global_options.stdlog.write(\n \"# %s\\t%6i\\t%5.2f%%\\n\" % (key, value,\n (100.0 * float(value) / t)))\n global_options.stdlog.write(\n \"#######################################################\\n\")\n\n if global_options.loglevel >= 1:\n global_options.stdlog.write(getFooter() + \"\\n\")\n\n # close files\n if global_options.stdout != sys.stdout:\n global_options.stdout.close()\n # do not close log, otherwise error occurs in atext.py\n # if global_options.stdlog != sys.stdout:\n # global_options.stdlog.close()\n\n if global_options.stderr != sys.stderr:\n global_options.stderr.close()\n\n if global_options.timeit_file:\n\n outfile = open(global_options.timeit_file, \"a\")\n\n if global_options.timeit_header:\n outfile.write(\"\\t\".join(\n (\"name\", \"wall\", \"user\", \"sys\", \"cuser\", \"csys\",\n \"host\", \"system\", \"release\", \"machine\",\n \"start\", \"end\", \"path\", \"cmd\")) + \"\\n\")\n\n csystem, host, release, version, machine = map(str, os.uname())\n uusr, usys, c_usr, c_sys = map(lambda x: \"%5.2f\" % x, os.times()[:4])\n t_end = time.time()\n c_wall = \"%5.2f\" % (t_end - global_starting_time)\n\n if sys.argv[0] == \"run.py\":\n cmd = global_args[0]\n if len(global_args) > 1:\n cmd += \" '\" + \"' '\".join(global_args[1:]) + \"'\"\n else:\n cmd = sys.argv[0]\n\n result = \"\\t\".join((global_options.timeit_name,\n c_wall, uusr, usys, c_usr, c_sys,\n host, csystem, release, machine,\n time.asctime(time.localtime(global_starting_time)),\n time.asctime(time.localtime(t_end)),\n os.path.abspath(os.getcwd()),\n cmd)) + \"\\n\"\n\n outfile.write(result)\n outfile.close()", "def benchmark_profile(self):\n cb_bin = os.path.join(bin_path, 'compilebench')\n desc = \"benchmark\"\n test_name = \"compilebench_{0}\".format(to_safe_name(desc))\n test = TestProfile(\n name=test_name,\n desc=desc,\n test_path=self.test_path,\n bin_path=bin_path,\n command=\"{0} -D {1} -i 10 --makej\".format(cb_bin, self.test_path))\n\n return test", "def measure(self):\n # --- perform repeated runs\n for i_run in range(self.n_runs):\n if self.verbosity > 0:\n print(\"Run {0} / {1} ...\".format(i_run, self.n_runs), end = '')\n tdelta = self._timed_execute()\n self._run_times[i_run] = tdelta\n\t\t\t\n if self.verbosity == 2:\n print(tdelta)\n \n # calculate mean\n self._tmean = np.mean(self._run_times)\n # calculate standard deviation\n self._tstdev = np.std(self._run_times)\n # allow access to results\n self.__hasrun = True", "def main():\n logging.info(\"Testing iOS application performance metrics: application size, launch duration and RAM memory usage!\")\n\n try:\n args = parse_args()\n\n TEST_RESULTS = run_tests(args)\n test_summary = create_test_summary(args, TEST_RESULTS)\n write_results_to_file(TEST_RESULTS, RESULTS_FILE, test_summary, SUMMARY_FILE)\n report_tests(args, test_summary)\n\n except Exception as e:\n logging.error(\"Testing performance of application failed with error '{ERROR}'\".format(ERROR=e))", "def main():\n known_args, unknown_args = parse_known_args()\n if not unknown_args:\n # return an error message if no command is provided\n sys.exit(\"Please provide a command to benchmark: $ humann_benchmark COMMAND\")\n try:\n process = subprocess.Popen(\" \".join(unknown_args),shell=True)\n except (EnvironmentError, subprocess.CalledProcessError):\n sys.exit(\"Unable to execute command: \" + \" \".join(unknown_args))\n pid=str(process.pid)\n start=time.time()\n max_memory=0\n while process.poll() is None:\n time.sleep(1)\n # while the process is running check on the memory use\n # get the pids of the main process and all children (and their children)\n pids=get_pids(pid)\n stdout=subprocess.check_output([\"ps\",\"--pid\",\",\".join(pids),\"-o\",\"pid,rss,command\"]).decode(\"utf-8\")\n print(\"\\n\"+stdout+\"\\n\")\n # remove the header from the process output\n status=[i.split() for i in filter(lambda x: x, stdout.split(\"\\n\")[1:])]\n # memory is the sum of all rss\n memory=sum(int(i[1]) for i in status)\n if memory > max_memory:\n max_memory=memory\n \n end=time.time()\n print(\"Time: {:.0f} minutes\".format((end-start)/60))\n print(\"Max Memory (RSS): {:.1f} GB\".format(max_memory*1.0/1024**2))", "def run(self, train=True):\n\n # generate synthetic measurements\n start = time()\n self.data = self.measure()\n\n # build graphs\n self.graphs = self.build_graphs()\n\n # train annotation object\n if train and self.train_globally:\n self.annotator = self.train(*list(self.graphs.values()),\n attribute=self.attribute,\n **self.training_kw)\n\n elif not self.train_globally:\n self.annotator = None\n\n # evaluate benchmarks\n self.results = self.evaluate_benchmarks()\n self.runtime = time() - start", "def execute(args, suite, benchmark, num_iters):\n\n p = Popen(args, stderr=PIPE, stdout=PIPE)\n stdout, stderr = p.communicate()\n stdout, stderr = stdout.decode(), stderr.decode()\n\n return DoneExec(suite, benchmark, args, num_iters, p.returncode,\n stdout, stderr)", "def run(self):\n self.run_measurement()\n self.run_analysis()\n if self.get_param_value('update'):\n self.run_update()", "def run(self):\n logging.info('running experiment...')\n self._prepare()\n self._load_data()\n self._run()\n self._evaluate()\n self._summarise()\n return True", "def Run(benchmark_spec):\n _UpdateBenchmarkSpecWithFlags(benchmark_spec)\n vm = benchmark_spec.vms[0]\n if benchmark_spec.tpus:\n # For MLPerf 1.0, the benchmake code of different hardware are different.\n if (benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-32' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-128' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-256' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-512' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-1024' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-2048'):\n run_path = (\n '$HOME/training_results_{version}/Google/benchmarks/{model}/tpu-{tpus}'\n .format(\n version=VERSION.value,\n model=benchmark_spec.benchmark,\n tpus=benchmark_spec.tpu_groups['train'].GetAcceleratorType()))\n code_path = (\n '$HOME/training_results_{version}/Google/benchmarks/{model}/implementations/tpu-{tpus}-{model}'\n .format(\n version=VERSION.value,\n model=benchmark_spec.benchmark,\n tpus=benchmark_spec.tpu_groups['train'].GetAcceleratorType()))\n\n if MASK in benchmark_spec.benchmark:\n model = 'mask_rcnn'\n elif GNMT in benchmark_spec.benchmark:\n model = 'nmt'\n else:\n model = benchmark_spec.benchmark\n\n mlperf_benchmark_cmd = (\n 'cd {code_path} && '\n 'export PYTHONPATH=$(pwd):$(pwd)/{model} && '\n 'cd {model} && '\n '{run_path}/run_and_time.sh'.format(\n code_path=code_path,\n model=model,\n run_path=run_path))\n\n if SSD in benchmark_spec.benchmark:\n mlperf_benchmark_cmd = (\n 'export '\n 'MLP_GCS_RESNET_CHECKPOINT={checkpoint}'\n ' && {cmd}'.format(\n checkpoint=FLAGS.mlperf_gcs_resnet_checkpoint,\n cmd=mlperf_benchmark_cmd))\n else:\n raise ValueError(\n 'MLPerf configurations do not support the hardware in PKB. PKB may '\n 'need to be updated if this is a new TPU type.')\n\n else:\n run_sub_paths = {RESNET: 'resnet/implementations/mxnet',\n TRANSFORMER: 'transformer/implementations/pytorch',\n MINIGO: 'minigo/implementations/tensorflow',\n MASK: 'maskrcnn/implementations/pytorch',\n GNMT: 'gnmt/implementations/pytorch',\n SSD: 'ssd/implementations/pytorch',\n BERT: 'bert/implementations/pytorch',}\n benchmark_path = f'$HOME/training_results_{VERSION.value}/NVIDIA/benchmarks'\n run_path = posixpath.join(benchmark_path,\n run_sub_paths[benchmark_spec.benchmark])\n env = {\n 'DGXSYSTEM': DGXSYSTEM,\n 'NEXP': 1,\n 'PULL': 0,\n 'LOGDIR': f'/tmp/{benchmark_spec.benchmark}',\n }\n envs = {\n RESNET: {},\n TRANSFORMER: {'DATADIR': '/data/wmt/utf8'},\n MINIGO: {'CONT': 'mlperf-nvidia:minigo'},\n MASK: {},\n GNMT: {'DATADIR': '/data/gnmt'},\n SSD: {'DATADIR': '/data'},\n BERT: {}\n }\n env.update(envs[benchmark_spec.benchmark])\n\n run_script = posixpath.join(run_path, 'run_with_docker.sh')\n vm_util.ReplaceText(vm, 'SYSLOGGING=1', 'SYSLOGGING=0', run_script)\n vm_util.ReplaceText(vm, 'docker exec -it', 'docker exec -t', run_script)\n if benchmark_spec.benchmark == RESNET:\n vm_util.ReplaceText(vm, r'mpirun.*run_and_time\\.sh',\n r'.\\/run_and_time.sh', run_script)\n\n env = ' '.join(f'{key}={value}' for key, value in env.items())\n if nvidia_driver.CheckNvidiaGpuExists(vm):\n env = f'{tensorflow.GetEnvironmentVars(vm)} {env}'\n\n mlperf_benchmark_cmd = (\n f'chmod 755 {run_script} && '\n f'cd {run_path} && '\n f'{env} {run_script}')\n\n samples = []\n metadata = _CreateMetadataDict(benchmark_spec)\n stdout, _ = vm.RobustRemoteCommand(mlperf_benchmark_cmd)\n if NONE in FLAGS.mlperf_profiler:\n samples.extend(\n MakeSamplesFromOutput(\n metadata,\n stdout,\n use_tpu=bool(benchmark_spec.tpus),\n model=benchmark_spec.benchmark))\n return samples", "def main():\n parser = optparse.OptionParser()\n parser.add_option('--debug', action='store_true', default=False,\n help='run in debug mode')\n parser.add_option('-i', '--iteration', type=int, default=DEFAULT_ITERATION,\n metavar='NUM',\n help='set the number of iterations for each test (defualt:%d)' % \\\n DEFAULT_ITERATION)\n parser.add_option('-f', '--fstypes', default='ext2,ext3,ext4,btrfs,xfs',\n type='string', metavar='TYPES', help='set the file systems to test')\n parser.add_option('-n', '--num', default=10000, type=int, metavar='NUM',\n help='set the number of file created')\n parser.add_option('-N', '--numa', action='store_true', default=False,\n help='run NUMA test')\n parser.add_option('-S', '--scalability', action='store_true', default=False,\n help='run scalability test')\n global options\n options, args = parser.parse_args()\n\n benchutils.check_root_or_die()\n suffix = ''\n if options.numa:\n suffix = 'numa'\n else:\n suffix = 'scale'\n output_dir = benchutils.get_output_directory(suffix=suffix, timestamp=True)\n fstypes = options.fstypes.split(',')\n for fs in fstypes:\n if options.numa:\n run_tests(output_dir, fs)\n elif options.scalability:\n run_scalability_tests(output_dir, fs)", "def main():\n configuration = {'resource-folder': 'resources',\n 'build-folder': 'build',\n 'log-folder': 'logfiles',\n 'use-preloaded': False,\n 'addi-metrics': 'addi-metrics.json',\n 'jenkins': {'dependency-filename': 'dependencies.txt',\n 'server': 'http://is.dbc.dk',\n 'repository-project': 'opensearch-3rd-party-dependencies'},\n 'log-zip-file':'logs.zip'}\n configuration.update(cli())\n setup_logger(configuration['verbose'])\n run_performance_test(configuration)", "def Run(benchmark_spec):\n cluster = benchmark_spec.dpb_service\n storage_service = cluster.storage_service\n metadata = benchmark_spec.dpb_service.GetMetadata()\n\n metadata['benchmark'] = BENCHMARK_NAMES[FLAGS.dpb_sparksql_query]\n\n # Run PySpark Spark SQL Runner\n report_dir = '/'.join([cluster.base_dir, f'report-{int(time.time()*1000)}'])\n args = [\n '--sql-scripts',\n ','.join(benchmark_spec.staged_queries),\n '--report-dir',\n report_dir,\n ]\n if FLAGS.dpb_sparksql_database:\n args += ['--database', FLAGS.dpb_sparksql_database]\n table_metadata = _GetTableMetadata(benchmark_spec)\n if table_metadata:\n table_metadata_file = '/'.join([cluster.base_dir, 'metadata.json'])\n _StageMetadata(table_metadata, storage_service, table_metadata_file)\n args += ['--table-metadata', table_metadata_file]\n else:\n # If we don't pass in tables, we must be reading from hive.\n # Note you can even read from Hive without --create_hive_tables if they\n # were precreated.\n args += ['--enable-hive', 'True']\n if FLAGS.dpb_sparksql_table_cache:\n args += ['--table-cache', FLAGS.dpb_sparksql_table_cache]\n if FLAGS.dpb_sparksql_simultaneous:\n args += ['--simultaneous', 'True']\n jars = []\n if FLAGS.spark_bigquery_connector:\n jars.append(FLAGS.spark_bigquery_connector)\n job_result = cluster.SubmitJob(\n pyspark_file='/'.join([cluster.base_dir, SPARK_SQL_RUNNER_SCRIPT]),\n job_arguments=args,\n job_jars=jars,\n job_type=dpb_service.BaseDpbService.PYSPARK_JOB_TYPE)\n\n # Spark can only write data to directories not files. So do a recursive copy\n # of that directory and then search it for the single JSON file with the\n # results.\n temp_run_dir = temp_dir.GetRunDirPath()\n storage_service.Copy(report_dir, temp_run_dir, recursive=True)\n report_file = None\n for dir_name, _, files in os.walk(\n os.path.join(temp_run_dir, os.path.basename(report_dir))):\n for filename in files:\n if filename.endswith('.json'):\n report_file = os.path.join(dir_name, filename)\n logging.info(report_file)\n if not report_file:\n raise errors.Benchmarks.RunError('Job report not found.')\n\n results = []\n run_times = {}\n passing_queries = set()\n with open(report_file, 'r') as file:\n for line in file:\n result = json.loads(line)\n logging.info('Timing: %s', result)\n query_id = _GetQueryId(result['script'])\n assert query_id\n passing_queries.add(query_id)\n metadata_copy = metadata.copy()\n metadata_copy['query'] = query_id\n results.append(\n sample.Sample('sparksql_run_time', result['duration'], 'seconds',\n metadata_copy))\n run_times[query_id] = result['duration']\n\n metadata['failing_queries'] = ','.join(\n sorted(set(FLAGS.dpb_sparksql_order) - passing_queries))\n\n results.append(\n sample.Sample('sparksql_total_wall_time', job_result.wall_time, 'seconds',\n metadata))\n results.append(\n sample.Sample('sparksql_geomean_run_time',\n sample.GeoMean(run_times.values()), 'seconds', metadata))\n cluster_create_time = cluster.GetClusterCreateTime()\n if cluster_create_time is not None:\n results.append(\n sample.Sample('dpb_cluster_create_time', cluster_create_time, 'seconds',\n metadata))\n return results", "def main(args):\n\n # Compose the model list\n modellist = []\n if args['model']:\n modellist.append(bmark.ModelInfo(args['model'], os.getcwd(), args['classname']))\n\n # Load the benchmark settings\n benchmark = None\n benchmark = bmark.load_benchmark(args['benchmark'])\n corresponding_data = False\n if 'corresponding_data' in benchmark:\n corresponding_data = benchmark['corresponding_data']\n\n # Only extend if not cached\n cache_df = None\n if not args['cache']:\n modellist.extend(benchmark['models'])\n else:\n cache_df = pd.read_csv(args['cache'])\n\n # Extract comparator settings from benchmark description\n eval_comparator = comparator.EqualityComparator()\n if 'comparator' in benchmark:\n if benchmark['comparator'] == 'nvc':\n eval_comparator = comparator.NVCComparator()\n\n # Run the model evaluation\n is_silent = (args['output'] in ['html', 'server'])\n eva = None\n if benchmark['type'] == 'adaption':\n eva = evaluator.AdaptionEvaluator(\n modellist,\n eval_comparator,\n benchmark['data.test'],\n train_datafile=benchmark['data.train'],\n train_data_person=benchmark['data.train_person'],\n silent=is_silent,\n corresponding_data=corresponding_data,\n domain_encoders=benchmark['domain_encoders'],\n cache_df=cache_df\n )\n elif benchmark['type'] == 'coverage':\n # Check for benchmark validity\n if benchmark['data.train'] or benchmark['data.train_person']:\n print('WARNING: Ignoring specified training and train_person data ' \\\n + 'for coverage evaluation...')\n\n eva = evaluator.CoverageEvaluator(\n modellist,\n eval_comparator,\n benchmark['data.test'],\n train_datafile=benchmark['data.train'],\n train_data_person=benchmark['data.train_person'],\n silent=is_silent,\n corresponding_data=corresponding_data,\n domain_encoders=benchmark['domain_encoders'],\n cache_df=cache_df\n )\n else:\n raise ValueError('Unknown benchmark type: {}'.format(benchmark['type']))\n\n with silence_stdout(is_silent):\n res_df = eva.evaluate()\n\n if 'save' in args:\n res_df.to_csv(args['save'], index=False)\n\n # Run the metric visualizer\n htmlcrtr = html_creator.HTMLCreator([\n viz_plot.AccuracyVisualizer(),\n viz_plot.BoxplotVisualizer(),\n viz_plot.TableVisualizer()\n ])\n\n # Prepare the benchmark output information and visualize the evaluation results\n benchmark_info = {\n 'name': os.path.basename(args['benchmark']),\n 'data.train': os.path.basename(\n benchmark['data.train']) if benchmark['data.train'] else '',\n 'data.train_person': os.path.basename(\n benchmark['data.train_person']) if benchmark['data.train_person'] else '',\n 'data.test': os.path.basename(benchmark['data.test']),\n 'type': benchmark['type'],\n 'corresponding_data': benchmark['corresponding_data'],\n 'domains': list(res_df['domain'].unique()),\n 'response_types': list(res_df['response_type'].unique()),\n }\n\n if args['output'] == 'browser':\n html = htmlcrtr.to_html(res_df, benchmark_info, embedded=False)\n server.load_in_default_browser(html.encode('utf8'))\n elif args['output'] == 'server':\n html = htmlcrtr.to_html(res_df, benchmark_info, embedded=True)\n sys.stdout.buffer.write(html.encode('utf-8'))\n elif args['output'] == 'html':\n html = htmlcrtr.to_html(res_df, benchmark_info, embedded=False)\n print(html)", "def print_timings(binary: str, testdata_filename: str):\n\n # Ensure we throw away an integer number of iterations\n assert ((100 - PRECISION_PERCENT) * ITERATIONS) % 200 == 0\n THROW_AWAY_AT_EACH_END = ((100 - PRECISION_PERCENT) * ITERATIONS) // 200\n\n # Do some warmup runs\n for _ in range(WARMUP_RUNS):\n with open(testdata_filename) as testdata:\n subprocess.check_call(binary, stdin=testdata, stdout=subprocess.DEVNULL)\n\n # Do the actual benchmarking runs\n deltas = []\n for _ in range(ITERATIONS):\n with open(testdata_filename) as testdata:\n t0 = time.time()\n subprocess.check_call(binary, stdin=testdata, stdout=subprocess.DEVNULL)\n t1 = time.time()\n dt_seconds = t1 - t0\n deltas.append(dt_seconds)\n\n deltas.sort()\n from_ms = deltas[THROW_AWAY_AT_EACH_END] * 1000\n to_ms = deltas[-THROW_AWAY_AT_EACH_END - 1] * 1000\n mid_ms = (from_ms + to_ms) / 2\n spread_ms = to_ms - from_ms\n print(f\"{mid_ms:.1f}ms±{spread_ms:.1f}ms: {binary}\")", "def speed_test(self):\n self.lg.debug('Performing speed test no. {}'.format(self.runs))\n self.st.get_best_server()\n self.st.upload()\n self.st.download()\n up = self.st.results.upload // 1e6\n down = self.st.results.download // 1e6\n timestamp = time.localtime(time.time())\n self.lg.debug('Timestamp: {}'.format(\n time.strftime('%H:%M:%S', timestamp)))\n self.lg.debug(\n 'Upload is {} Mbps'.format(up))\n self.lg.debug(\n 'Download is {} Mbps'.format(down))\n self.results_up.append(up)\n self.results_down.append(down)\n self.results_timestamp.append(timestamp)", "def run(self):\n self.run_measurement()\n self.run_analysis()\n self.results = self.analysis.proc_data_dict['analysis_params_dict']\n if self.get_param_value('update'):\n self.run_update()\n self.dev.update_cancellation_params()\n\n if self.get_param_value('configure_mux_drive'):\n drive_lo_freqs = self.get_param_value('drive_lo_freqs')\n configure_qubit_mux_drive(self.qubits, drive_lo_freqs)", "def _auto_run(args):\n\n # TDH (2020-01-13) For developement testing the following section\n # replicates the functionality of \"standard_analysis.py\" so that\n # json_results can be created and used to create the graph image\n # files.\n import benchmark_postprocessing as bmpp\n file_list = bmpp.get_benchmark_files(args.benchmark_results_dir)\n json_results = bmpp.parse_files(file_list)\n json_results = bmpp.parse_and_add_benchmark_metadata(json_results)\n run_id_list = get_unique_run_ids(json_results)\n\n # TDH (2020-01-13) - Create unqiue reports for each run ID found.\n # Even a single results directory can contain results from multiple\n # run IDs.\n for run_id in run_id_list:\n output_path = os.path.join(\n args.benchmark_results_dir,\n '{}_report'.format(run_id))\n\n # TDH: Thorough attempt to safely create the results directory and\n # provide good error reporting if something went wrong.\n try:\n os.mkdir(output_path)\n except OSError:\n logging.error('Failed to create directory for report at {}'.format(\n output_path))\n create_standard_analysis_report(output_path,\n json_results,\n run_id)", "def testBenchmark(self, sTargetOs, sBenchmark, sMountpoint, oExecutor, dTestSet, \\\n cMsTimeout = 3600000):\n\n dTestSet['FilePath'] = sMountpoint;\n dTestSet['TargetOs'] = sTargetOs;\n\n oTst = None;\n if sBenchmark == 'iozone':\n oTst = IozoneTest(oExecutor, dTestSet);\n elif sBenchmark == 'fio':\n oTst = FioTest(oExecutor, dTestSet); # pylint: disable=R0204\n\n if oTst is not None:\n fRc = oTst.prepare();\n if fRc:\n fRc = oTst.run(cMsTimeout);\n if fRc:\n if self.fReportBenchmarkResults:\n fRc = oTst.reportResult();\n else:\n reporter.testFailure('Running the testcase failed');\n reporter.addLogString(oTst.getErrorReport(), sBenchmark + '.log',\n 'log/release/client', 'Benchmark raw output');\n else:\n reporter.testFailure('Preparing the testcase failed');\n\n oTst.cleanup();\n\n return fRc;", "def run_benchmarks(urls, urlIndices, trial_number):\n path.append(os.path.join(CHROMIUM_SRC, 'tools/perf/'))\n benchmark_path = os.path.join(CHROMIUM_SRC, 'tools/perf/run_benchmark')\n output_path = 'temp'\n trial_key = 'trial{0}'.format(trial_number)\n\n cmd = ('sudo ' + benchmark_path + ' --profiler=trace telemetryBenchmarks.url{0}')\n for i in urlIndices:\n try:\n out, err, returncode = get_benchmark_result(cmd.format(i))\n timeout = False\n print 'successfully ran benchmark for url' + str(i)\n except TimeoutError:\n # Benchmark failed\n print 'Benchmark Timeout!'\n out = ''\n returncode = 1\n timeout = True\n\n failed = ['FAILED']\n if returncode != 0 or any(x in out for x in failed) or timeout:\n # If a benchmark fails, remove its corresponding wpr file, and act\n # as if it didn't exist\n # Remove from data/wpr_source\n print 'Benchmark {0} failed'.format(i)\n print 'return code is ' + str(returncode)\n print 'Out:'\n print out\n print 'Err:'\n print err\n urlName = 'url{0}_page_set_000.wpr'.format(i)\n urlpcName = 'url{0}_pc_page_set_000.wpr'.format(i)\n urlFilePath = os.path.join('data/wpr_source',urlName)\n urlpcFilePath = os.path.join('data/wpr_source',urlpcName)\n urlCmd = 'rm -f {0}'.format(urlFilePath)\n urlpcCmd = 'rm -f {0}'.format(urlpcFilePath)\n print 'Removing: {0}, {1}'.format(urlFilePath, urlpcFilePath)\n commands = [\n 'rm -f {0}'.format(urlFilePath),\n 'rm -f {0}'.format(urlpcFilePath)\n ]\n for cmdss in commands:\n p = Popen(cmdss, shell=True)\n p.wait()\n # Skip the rest of this url\n print \"Moving on!\"\n continue\n\n # Parse data\n tmp_path = 'temp/tmp_benchmark_result_json'\n with open(tmp_path, 'rb') as f:\n tmp_json = json.load(f)\n benchmark_results = tmp_json['values']\n commands = [\n 'rm -f ~/page_load_time/telemetry/temp/tmp_benchmark_result_json',\n ]\n for cmds in commands:\n p = Popen(cmds, shell=True)\n p.wait()\n\n output = {urls[i]: {'cold_times': {trial_key: benchmark_results}}}\n output_file = os.path.join(output_path, urlsafe_b64encode(urls[i]))\n output_file += '.' + str(trial_number)\n try:\n with open(output_file, 'w') as f:\n json.dump(output, f)\n except IOError:\n raise IOError('Unable to write to {0}'.format(output_file))\n\n\n ############### Now run for Perfect Cache file ################\n\n try:\n out, err, returncode = \\\n get_benchmark_result(cmd.format(str(i) + '_pc'))\n timeout = False\n print 'successfully ran benchmark for url' + str(i) + '_pc'\n except TimeoutError:\n # Benchmark failed\n print 'Benchmark Timeout!'\n out = ''\n returncode = 1\n timeout = True\n\n failed = ['FAILED']\n if returncode != 0 or any(x in out for x in failed) or timeout:\n # If a benchmark fails, remove its corresponding wpr file, and act\n # as if it didn't exist\n # Remove from data/wpr_source\n\n print 'Benchmark {0}_pc failed'.format(i)\n print 'Out:'\n print out\n print 'Err:'\n print err\n urlName = 'url{0}_page_set_000.wpr'.format(i)\n urlpcName = 'url{0}_pc_page_set_000.wpr'.format(i)\n urlFilePath = os.path.join('data/wpr_source',urlName)\n urlpcFilePath = os.path.join('data/wpr_source',urlpcName)\n urlCmd = 'rm -f {0}'.format(urlFilePath)\n urlpcCmd = 'rm -f {0}'.format(urlpcFilePath)\n print 'Removing: {0}, {1}'.format(urlFilePath, urlpcFilePath)\n commands = [\n 'rm -f {0}'.format(urlFilePath),\n 'rm -f {0}'.format(urlpcFilePath)\n ]\n for cmdss in commands:\n p = Popen(cmdss, shell=True)\n p.wait()\n # Skip the rest of this url\n print \"Moving on!\"\n continue\n\n # Parse data\n tmp_path = 'temp/tmp_benchmark_result_json'\n with open(tmp_path, 'rb') as f:\n tmp_json = json.load(f)\n benchmark_results = tmp_json['values']\n\n commands = [\n 'rm -f ~/page_load_time/telemetry/temp/tmp_benchmark_result_json',\n ]\n for cmds in commands:\n p = Popen(cmds, shell=True)\n p.wait()\n\n output = {urls[i]: {'cold_times': {trial_key: benchmark_results}}}\n output_file = os.path.join(output_path, urlsafe_b64encode(urls[i]))\n output_file += '.' + str(trial_number) + '.pc'\n try:\n with open(output_file, 'w') as f:\n json.dump(output, f)\n except IOError:\n raise IOError('Unable to write to {0}'.format(output_file))", "def run_performance_test(configuration):\n if not os.path.exists(configuration['log-folder']):\n os.mkdir(configuration['log-folder'])\n\n stop_stack = CleanupStack.getInstance()\n\n container_pool = ContainerPoolImpl()\n container_pool.cfg = configuration\n suite = container_pool.take(log_folder=configuration['log-folder'])\n stop_stack.addFunction(container_pool.shutdown)\n stop_stack.addFunction(container_pool.release, suite)\n\n addi_ip = suite.get(\"addi-service\").get_ip()\n\n try:\n\n addi_jolokia_url = \"http://%s:8080/jolokia\" % suite.get(\"addi-service\").get_ip()\n\n # Add jobs\n add_all_addi_job(addi_ip, \"performance test job\")\n\n test_executor(configuration['run-time'])\n\n dump_statistics(configuration['addi-metrics'], (addi_jolokia_url, 'AddiService'))\n\n # Automatically generated plot\n performance_plotter.plot_dump_to(\"performance-report-auto\", configuration['addi-metrics'])\n # Plot based on hive-metrics.ini\n performance_plotter.plot('addi-metrics.ini', 'Addi service processing', 'performance-report')\n\n except Exception as err:\n die(\"Caught error during performance test:\\n%s\" % format_traceback(sys.exc_info(), err))\n\n finally:\n stop_stack.callFunctions()\n zip_logfiles(configuration['log-folder'], configuration['log-zip-file'])", "def benchmark(self, **kwargs):\n num_iterations = kwargs.get(\"benchmark_iterations\")\n\n start_time = time.time()\n\n # store how far off we are\n deviations = []\n\n for _ in xrange(num_iterations):\n kwargs[\"roll\"] = decimal.Decimal(random.uniform(\n self.MIN_BENCHMARK_ROLL, self.MAX_BENCHMARK_ROLL))\n kwargs[\"pitch\"] = decimal.Decimal(random.uniform(\n self.MIN_BENCHMARK_PITCH, self.MAX_BENCHMARK_PITCH))\n\n _, deviation = self.find_closest_trajectory(**kwargs)\n deviations.append(deviation)\n\n # calculate results from the benchmarking\n total_time = time.time() - start_time\n average_time = total_time / num_iterations\n average_deviation = sum(deviations) / len(deviations)\n\n print \"AVERAGE TIME: %s AVERAGE DEVIATION: %s\" \\\n % (average_time, average_deviation)", "def bench_report(t1, t2):\n print \"\\n\\n Time taken: {0}\".format(t2 - t1)", "def test_execution_profiling(self):\n self._test_reports_helper({\"--profile-execution\": \"\"}, [\"report.txt\"])", "def run_spec(spec,\n benchmark_hosts,\n result_hosts=None,\n output_fmt=None,\n logfile_info=None,\n logfile_result=None,\n action=None,\n fail_if=None,\n sample_mode='reservoir',\n re_name=None):\n with Logger(output_fmt=output_fmt,\n logfile_info=logfile_info,\n logfile_result=logfile_result) as log:\n do_run_spec(\n spec=spec,\n benchmark_hosts=benchmark_hosts,\n log=log,\n result_hosts=result_hosts,\n action=action,\n fail_if=fail_if,\n sample_mode=sample_mode,\n re_name=re_name\n )", "def _execute(self, model: ExecutableModelSpace) -> Any:\n\n from .space import BenchmarkModelSpace\n if not isinstance(model, BenchmarkModelSpace):\n warnings.warn('It would be better to use BenchmarkModelSpace for benchmarking to avoid '\n 'unnecessary overhead and silent mistakes.')\n if model.sample is None:\n raise ValueError('Model can not be evaluted because it has not been sampled yet.')\n\n return self.evaluate(model.sample)", "def test_BenchmarkSuite_integration_test(\n benchmark_suite: typing.Callable, tempdir: pathlib.Path\n):\n with benchmark_suite() as bs:\n bs.ForceOpenCLEnvironment(cldrive_env.OclgrindOpenCLEnvironment())\n observer = MockBenchmarkObserver(stop_after=1)\n\n # `stop_after` raises BenchmarkInterrupt.\n try:\n bs.Run([observer])\n assert False\n except gpgpu.BenchmarkInterrupt:\n pass\n\n assert len(observer.logs) == 1\n assert observer.logs[0].benchmark_name in bs.benchmarks", "def benchmark():\n parser = argparse.ArgumentParser(\n \n description='pyrpipe diagnostic utility\\nGenerate benchmark report.',\n \n usage='''pyrpipe_diagnostic report [<args>] <logfile>\n \n ''') \n parser.add_argument('-o', help='out file \\ndefault: same as input logfile',action=\"store\")\n parser.add_argument('-e', help='report output type: [MD,PDF,HTML] \\ndefault: PDF',default='PDF',action=\"store\")\n parser.add_argument('-v',help='verbose',action=\"store_true\")\n parser.add_argument('-f',help='Filter by programs. Provide a comma-separated list e.g., prefetch,STAR,bowtie2 \\ndefault None')\n parser.add_argument('-t',help='Temporary directory. \\ndefault ./tmp',action=\"store\")\n parser.add_argument('logfile', help='The log file generated by pyrpipe',action=\"store\")\n args = parser.parse_args(sys.argv[2:])\n \n logFile=args.logfile\n envLog=reports.checkEnvLog(logFile) \n #parse args\n vFlag=args.v\n if vFlag:\n print(\"Generating benchmarks\")\n outFile=\"\"\n if args.o is None:\n outFile=pu.get_file_basename(args.logfile)\n else:\n outFile=args.o\n outFile+='.'+args.e\n \n filters=[]\n if args.f is not None:\n filters= args.f.split(',')\n #create temp dir\n tempDir=\"\"\n if args.t is not None:\n tempDir= args.t\n else:\n tempDir=os.path.join(os.getcwd(),\"tmp\")\n #create tmp dir\n if not pu.check_paths_exist(tempDir):\n pu.mkdir(tempDir)\n \n reports.generateBenchmarkReport(logFile,envLog,filters,tempDir,outFile=outFile,verbose=args.v)", "def _report_result(self, class_method_name, oss_report_object, uid,\n output_dir):\n # Upload benchmark ouput\n output_gcs_dir = None\n if not self.config.output_gcs_url_str:\n print('Skipping uploading output. output_gcs_url_str is not set.')\n elif not os.listdir(output_dir):\n print('Skipping uploading output. Directory is empty:{}'.\n format(output_dir))\n else:\n output_gcs_dir = '{}/{}/'.format(self.config.output_gcs_url_str, uid)\n utils.upload_to_gcs(output_dir, output_gcs_dir)\n\n extras = {}\n extras['artifacts'] = output_gcs_dir\n\n main_result, results, test_info, system_info = report_utils.build_entry(\n class_method_name,\n total_time=oss_report_object.total_time,\n test_environment=self.config.test_env_str,\n platform=self.config.platform_name_str,\n platform_type=self.config.platform_type_str)\n\n for test_result in oss_report_object.get_results():\n report_utils.add_result(results, test_result)\n\n print('results:{}'.format(results))\n report_utils.report_result(\n main_result,\n results,\n test_info,\n system_info,\n extras=extras,\n project=self.config.project_name_str,\n dev=False)", "def sciml_bench_run(smlb_in: RuntimeIn, smlb_out: RuntimeOut):\n # activate monitor\n # Note: To use smlb_out, you must activate it, passing the rank\n # information initialized by your distributed learning environment;\n # for a non-distributed benchmark, simply pass rank=0, local_rank=0\n # and activate_log_on_host(_device)=False; here we use True for\n # demonstration -- the log on host0 and device0 will be the same as\n # that on console except for some small differences in time\n # measurements.\n smlb_out.activate(rank=0, local_rank=0, activate_log_on_host=True,\n activate_log_on_device=True, console_on_screen=True)\n\n # log top level process\n # Note: Calling begin(), ended() and message() on smlb_out.log means\n # calling these functions on console, host and device; nothing\n # happens when calling these functions on an unactivated logger.\n log = smlb_out.log\n log.begin('Running benchmark MNIST_tf_keras')\n\n # parse input arguments (only batch_size and epochs)\n # Note: Use try_get() to get a benchmark-specific argument safely from\n # smlb_in.bench_args (passed by users via -b).\n with log.subproc('Parsing input arguments'):\n # hyperparameters\n batch_size = smlb_in.bench_args.try_get('batch_size', default=64)\n epochs = smlb_in.bench_args.try_get('epochs', default=2)\n log.message(f'batch_size = {batch_size}')\n log.message(f'epochs = {epochs}')\n\n # create datasets\n with log.subproc('Creating datasets'):\n dataset_dir = smlb_in.dataset_dir\n train_set = create_dataset_mnist(dataset_dir / 'train.hdf5', batch_size)\n test_set = create_dataset_mnist(dataset_dir / 'test.hdf5', batch_size)\n log.message(f'Dataset directory: {dataset_dir}')\n\n # create model\n with log.subproc('Creating CNN model'):\n model = create_model_mnist()\n\n # train model\n log.begin('Training CNN model')\n # fit()\n with log.subproc('Running model.fit()'):\n # stamp model.fit in system monitor\n # Note: smlb_out.system will monitor system usage regularly; use\n # smlb_out.system.stamp_event() to stamp an event in the report\n smlb_out.system.stamp_event('model.fit')\n history = model.fit(train_set, epochs=epochs, batch_size=batch_size,\n validation_data=test_set, verbose=0,\n callbacks=[LogEpochCallback(smlb_out)])\n # save model\n with log.subproc('Saving model weights'):\n weights_file = smlb_in.output_dir / 'model_weights.h5'\n model.save(weights_file)\n log.message(f'Saved to: {weights_file}')\n # save history\n with log.subproc('Saving training history'):\n history_file = smlb_in.output_dir / 'training_history.yml'\n with open(history_file, 'w') as handle:\n yaml.dump(history.history, handle)\n log.message(f'Saved to: {history_file}')\n log.ended('Training CNN model')\n\n # predict\n with log.subproc('Making predictions on test set'):\n with h5py.File(dataset_dir / 'test.hdf5', 'r') as h5_file:\n # stamp model.predict in system monitor\n smlb_out.system.stamp_event('model.predict')\n pred = model.predict(np.expand_dims(h5_file['image'][:], -1) / 255)\n correct = np.sum(pred.argmax(axis=1) == h5_file['label'][:])\n log.message(f'{correct} correct predictions for {len(pred)} images '\n f'(accuracy: {correct / len(pred) * 100:.2f}%)')\n\n # end top level\n log.ended('Running benchmark MNIST_tf_keras')", "def process(self, args):\n for benchmark_file in args.benchmark_files:\n self.process_individual_file(benchmark_file)\n self.total_files += 1", "def _Run(self, vm, **kwargs):\n for pv in FLAGS.ycsb_run_parameters:\n param, value = pv.split('=', 1)\n kwargs[param] = value\n command = self._BuildCommand('run', **kwargs)\n # YCSB version greater than 0.7.0 output some of the\n # info we need to stderr. So we have to combine these 2\n # output to get expected results.\n hdr_files_dir = kwargs.get('hdrhistogram.output.path', None)\n if hdr_files_dir:\n vm.RemoteCommand('mkdir -p {0}'.format(hdr_files_dir))\n stdout, stderr = vm.RobustRemoteCommand(command)\n return ycsb_stats.ParseResults(\n str(stderr + stdout),\n self.measurement_type,\n _ERROR_RATE_THRESHOLD.value,\n self.burst_time_offset_sec,\n )", "def run(self):\n if self.all:\n cmd = self.apply_options(self.test_all_cmd)\n self.call_and_exit(cmd)\n else:\n cmds = (self.apply_options(self.unit_test_cmd, (\"coverage\",)),)\n if self.coverage:\n cmds += (self.apply_options(self.coverage_cmd),)\n self.call_in_sequence(cmds)", "def test_runner_full_loop(caplog, dataset):\n caplog.set_level(logging.DEBUG)\n\n session = dataset\n\n start_date = datetime.datetime(2020, 5, 17, 13, 0, 0)\n end_date = datetime.datetime(2020, 5, 17, 13, 0, 5)\n replay_rate = 1 \n \n db_connector_test = DataBaseConnector(session=session, \n table_name='timeseries_dataset', \n time_column='timestamp', \n start_date=start_date,\n end_date=end_date)\n\n test_publisher = ConsolePublisher()\n\n runner = CentralRunner(db_connection=db_connector_test, \n output_system=test_publisher, \n start_time=start_date, \n end_time=end_date,\n replay_rate=replay_rate )\n\n start = time.perf_counter()\n \n runner.run()\n\n end = time.perf_counter()\n\n code_time = end - start\n \n print(code_time)\n \n assert int(code_time) == 4", "def run_speedtest(cls, **kwargs):\n try:\n cls.response(\"Sure! wait a second to measure\")\n st = speedtest.Speedtest()\n server_names = []\n st.get_servers(server_names)\n\n downlink_bps = st.download()\n uplink_bps = st.upload()\n ping = st.results.ping\n up_mbps = uplink_bps / 1000000\n down_mbps = downlink_bps / 1000000\n\n cls.response(\"Speedtest results:\\n\"\n \"The ping is: %s ms \\n\"\n \"The upling is: %0.2f Mbps \\n\"\n \"The downling is: %0.2f Mbps\" % (ping, up_mbps, down_mbps)\n )\n\n except Exception as e:\n cls.response(\"I coudn't run a speedtest\")\n logging.error(\"Speedtest error with message: {0}\".format(e))", "def main():\n # Load and parse json object from file with specific\n file_name = \"./benchmark.log\"\n doc = re.sub(\"[\\n|\\t]\", \"\", \"\".join(benchmark.read_text_file(file_name)))\n json_object = json.loads(\"\".join(doc))\n\n intervals = json_object[\"intervals\"]\n\n socket_keys = benchmark.get_socket_keys(intervals)\n\n result = benchmark.get_result_dictionary(intervals, socket_keys)\n\n print_to_csv(result, socket_keys)", "def execute(self):\n\n if not os.path.exists(self._source_file_name):\n logger.info(\"Did not find the aicpu profiling source file\")\n return\n\n with open(self._source_file_name, 'rb') as ai_cpu_data:\n content = ai_cpu_data.read()\n if content[0:2].hex().upper() == \"5A5A\":\n ai_cpu_total_time_summary, result_list = self.parser_binary_file(content)\n else:\n ai_cpu_total_time_summary, result_list = self.parser_txt_file(content)\n\n os.chmod(self._source_file_name, stat.S_IREAD)\n\n if result_list:\n ai_cpu_total_time = format(ai_cpu_total_time_summary, '.6f')\n result_list.append([\"AI CPU Total Time(ms):\", ai_cpu_total_time])\n fwrite_format(self._output_filename, \" \".join(self._dst_file_column_title), is_start=True, is_print=True)\n fwrite_format(self._output_filename, result_list, is_print=True)\n\n # For timeline display.\n self._result_list = result_list", "def evaluate_benchmarks(self):\n\n # iterate over replicates\n results = {}\n for replicate_id, replicate in self.replicates:\n\n # evaluate benchmark for current replicate\n bmark = SimulationBenchmark(replicate.copy(),\n graph=self.graphs[replicate_id],\n **self.params)\n\n # store results\n results[replicate_id] = dict(\n\n labels_MAE=bmark.scores['labels'].MAE,\n level_only_MAE=bmark.scores['level_only'].MAE,\n spatial_only_MAE=bmark.scores['spatial_only'].MAE,\n community_MAE=bmark.scores['labels_comm'].MAE,\n\n labels_PCT=bmark.scores['labels'].percent_correct,\n level_only_PCT=bmark.scores['level_only'].percent_correct,\n spatial_only_PCT=bmark.scores['spatial_only'].percent_correct,\n community_PCT=bmark.scores['labels_comm'].percent_correct)\n\n # compile dataframe\n results = pd.DataFrame.from_dict(results, orient='index')\n results.index.set_names(self.multiindex, inplace=True)\n\n return results", "def run(self, test):\n result = _TestResult(self.verbosity)\n\n self.start_time = datetime.datetime.now()\n test(result)\n self.stop_time = datetime.datetime.now()\n self.elapsed_time = self.stop_time - self.start_time\n print >>sys.stderr, '\\nTime Elapsed: %s' % self.elapsed_time\n\n report = self.generate_report(result)\n self.stream.write(report.encode('utf8'))\n\n return result", "def benchmark(self, f, name, publish=True, **kwargs):\n (\n tags,\n optional_benchmark_info,\n context,\n info,\n github,\n options,\n cluster_info,\n _,\n ) = self._init(kwargs)\n self.set_python_info_and_context(info, context)\n\n timing_options = self._get_timing_options(options)\n iterations = timing_options.pop(\"iterations\")\n if iterations < 1:\n raise ValueError(f\"Invalid iterations: {iterations}\")\n\n try:\n data, output = self._get_timing(f, iterations, timing_options)\n # It's hard to read what this next function call really does. It\n # does _not_ publish, but I think it creates a specific data\n # structure. Should this be in the exception handler? Within\n # self._get_timing() above we run user-given code, so that is\n # expected to raise exceptions, and wants to be handled. But which\n # exceptions is self.record() expected to raise especially when\n # _not_ doing HTTP interaction? And why do we handle those\n # exceptions in the same way as those exceptions that are raised by\n # user-given code?\n benchmark, _ = self.record(\n {\"data\": data, \"unit\": \"s\"},\n name,\n tags=tags,\n optional_benchmark_info=optional_benchmark_info,\n context=context,\n info=info,\n github=github,\n options=options,\n cluster_info=cluster_info,\n publish=False,\n )\n except Exception as exc:\n error = {\"stack_trace\": traceback.format_exc()}\n benchmark, _ = self.record(\n None,\n name,\n tags=tags,\n optional_benchmark_info=optional_benchmark_info,\n context=context,\n info=info,\n github=github,\n options=options,\n cluster_info=cluster_info,\n error=error,\n publish=False,\n )\n raise exc\n finally:\n if publish:\n # It's a bit unclear -- is `benchmark` defined in _all_ cases\n # when we arrive here?\n # https://pylint.readthedocs.io/en/latest/user_guide/messages/error/used-before-assignment.html\n self.publish(benchmark) # pylint: disable=used-before-assignment\n return benchmark, output", "def emPerformanceTest(filesAndDirectories='None', resultsFileName='None', options='None'):\n\n pass", "def run(self, directories, params):\n\n ps = subprocess.Popen(\n \"ab -n 2000 -c 100 %s\" % (params[\"url\"]),\n shell=True,\n stdout=subprocess.PIPE,\n cwd=directories.builtins\n )\n\n out, err = ps.communicate()\n\n open(\n os.path.join(\n directories.artifacts,\n self.out\n ),\"w\"\n ).write(out)\n\n timetaken = re.findall('Time taken for tests:\\s+(\\d+\\.\\d+) seconds', out)[0]\n return {\n \"return_code\": ps.returncode,\n \"raw_output\": out,\n \"metrics\": [\n (\"load_test_seconds\", \"number\", timetaken)\n ]\n }", "def Run(benchmark_spec: bm_spec.BenchmarkSpec) -> List[sample.Sample]:\n discovery_duration = benchmark_spec.data_discovery_service.DiscoverData()\n return [\n sample.Sample('data_discovery_duration', discovery_duration, 'seconds',\n benchmark_spec.data_discovery_service.GetMetadata())]", "def performance_test():\n from timeit import Timer\n t = Timer(\"test()\", \"from __main__ import test\")\n print t.timeit(number=1)", "def execute_experiment_with_latency(self):\n protocol_name = self.protocol_config['protocol']\n number_of_repetitions = self.protocol_config['numOfRepetitions']\n configurations = self.protocol_config['configurations']\n working_directory = self.protocol_config['workingDirectory']\n executables = self.protocol_config['executableName']\n for i in range(number_of_repetitions):\n for idx2 in range(len(configurations)):\n for idx in range(len(executables)):\n os.system(f'fab -f Execution/fabfile.py run_protocol_with_latency:{self.protocol_config_path},'\n f'{configurations[idx2]},{executables[idx]},{working_directory[idx]} --parallel | '\n f' tee WebApp/ExecutionLogs/{protocol_name}.log')", "def run(self, **kwargs):\n try:\n name = kwargs[\"name\"]\n try:\n self.suite = unittest.TestLoader().loadTestsFromName(name)\n except ImportError:\n self.__logger.error(\"Can not import %s\", name)\n return testcase.TestCase.EX_RUN_ERROR\n except KeyError:\n pass\n try:\n assert self.suite\n self.start_time = time.time()\n stream = six.StringIO()\n result = unittest.TextTestRunner(\n stream=stream, verbosity=2).run(self.suite)\n self.__logger.debug(\"\\n\\n%s\", stream.getvalue())\n self.stop_time = time.time()\n self.details = {\n \"testsRun\": result.testsRun,\n \"failures\": len(result.failures),\n \"errors\": len(result.errors),\n \"stream\": stream.getvalue()}\n self.result = 100 * (\n (result.testsRun - (len(result.failures) +\n len(result.errors))) /\n result.testsRun)\n return testcase.TestCase.EX_OK\n except AssertionError:\n self.__logger.error(\"No suite is defined\")\n return testcase.TestCase.EX_RUN_ERROR\n except ZeroDivisionError:\n self.__logger.error(\"No test has been run\")\n return testcase.TestCase.EX_RUN_ERROR", "def run_all_iterations(self):\n self.start_time = time.time()\n for _ in xrange(self.iterations):\n self.run_iteration()\n self.elapsed_time = time.time() - self.start_time\n\n self.print_statistics()", "def run_performance():\n # Create a Struct data instance from config\n inputs = Struct(config)\n inputs.throttle = throttle\n # Get oxidizer properties at the given temperature\n n2o = n2o_properties(inputs.ox.T_tank)\n # Our integration variables are oxidizer mass and liquid oxidizer volume\n Mox = n2o.rho_l*(inputs.ox.liquid_V) + n2o.rho_g*(inputs.ox.tank_V-inputs.ox.liquid_V)\n if inputs.options.output_on:\n print(\"Initial oxidizer mass: {} kg.\".format(Mox))\n\n start = time.perf_counter() # Start timer for integration\n\n time, record = integration(inputs) # Time = time for integration, record = output data\n F_thrust = record.F_thrust\n p_cc = record.p_cc\n p_oxtank = record.p_oxtank\n p_oxpresstank = record.p_oxpresstank\n p_fueltank = record.p_fueltank\n p_fuelpresstank = record.p_fuelpresstank\n p_oxmanifold = record.p_oxmanifold\n T_oxtank = record.T_oxtank\n T_cc = record.T_cc\n area_core = record.area_core\n OF = record.OF_i\n gamma_ex = record.gamma_ex\n m_dot_ox = record.m_dot_ox\n m_dot_fuel = record.m_dot_fuel\n p_crit = record.p_crit\n m_dot_ox_crit = record.m_dot_ox_crit\n M_e = record.M_e\n p_exit = record.p_exit\n p_shock = record.p_shock\n\n time_elapsed = start-time.perf_counter() # Stop the timer and print elapsed time\n if inputs.options.output_on:\n print(\"Time elapsed for this timestep: {} sec.\".format(time_elapsed))", "def run(self, job_result):\n self.logger.info(f\"Running report\")\n job_result.status = JobResultStatusChoices.STATUS_RUNNING\n job_result.save()\n\n try:\n\n for method_name in self.test_methods:\n self.active_test = method_name\n test_method = getattr(self, self.test_methods[method_name])\n test_method()\n\n if self.failed:\n self.logger.warning(\"Report failed\")\n job_result.status = JobResultStatusChoices.STATUS_FAILED\n else:\n self.logger.info(\"Report completed successfully\")\n job_result.status = JobResultStatusChoices.STATUS_COMPLETED\n\n except Exception as e:\n stacktrace = traceback.format_exc()\n self.log_failure(None, f\"An exception occurred: {type(e).__name__}: {e} <pre>{stacktrace}</pre>\")\n logger.error(f\"Exception raised during report execution: {e}\")\n job_result.set_status(JobResultStatusChoices.STATUS_ERRORED)\n\n job_result.data = self._results\n job_result.completed = timezone.now()\n job_result.save()\n\n # Perform any post-run tasks\n self.post_run()", "def test_time(cmd, samples=16, warmup=4):\n # do testing\n print()\n avg_time = 0\n for s in range(samples + warmup):\n # report progress\n progress = s / (samples + warmup)\n print(CSI_UP + CSI_CLEARLN + \"Testing [{}%]\".format(floor(progress * 100)))\n\n output = shell(cmd) # run command\n tables = csv_mt.read_string(output, parse_float=True) # parse its output\n time = tables[\"statistics\"][\"time_us\"][0] # get its timing data\n\n # skip a few runs to let the system \"warm up\"\n if s >= warmup:\n avg_time += time / samples # compute average execution time\n\n # log the average time for this test case\n return avg_time", "def measure_test(self):\n return self.execute(Sgp40I2cCmdExecuteSelfTest())", "def benchmark(self, train_X, train_y):\n roc_ap = []\n test_X, test_y = self.format_input(self.M.befree_genes, self.neg_test_genes)\n roc_ap.append(self.test(test_X, test_y))\n test_X, test_y = self.format_input(self.M.sven_genes, self.neg_test_genes)\n roc_ap.append(self.test(test_X, test_y))\n roc_ap.extend(self.cross_validate(train_X, train_y))\n\n index = list(range(1, 11))\n index = ['befree', 'sven'] + index\n output = pd.DataFrame([index] + list(zip(*roc_ap)), index=['label', 'roc', 'ap'])\n output.to_csv(self.save_path + '/benchmarking output')", "def run(self):\n\t\tself.print_header_information()\n\n\t\t#self.get_number_of_instances_from_user()\n\n\t\t#self.compile_dataframe(self.number_of_instances)\n\n\t\tprint \"\\n{}\".format(self.data)\n\n\t\t# Uncomment these lines for debugging\n\t\tself.compile_dataframe_default()\n\t\t# print \"\\n{}\".format(self.data)\n\n\t\tself.analysis_of_dataframe(self.data)", "def report_performance(self):\n performance = self.amygdala.visualize(self.timestep, \n self.name, \n self.log_dir)\n print('Final performance is {0:.3}'.format(performance))\n self.backup()\n return performance", "def Run(benchmark_spec):\n _UpdateBenchmarkSpecWithFlags(benchmark_spec)\n vm = benchmark_spec.vms[0]\n\n if benchmark_spec.tpus:\n mnist_benchmark_script = 'mnist_tpu.py'\n mnist_benchmark_cmd = ('cd tpu/models && '\n 'export PYTHONPATH=$(pwd) && '\n 'cd official/mnist && '\n 'python {script} '\n '--data_dir={data_dir} '\n '--iterations={iterations} '\n '--model_dir={model_dir} '\n '--batch_size={batch_size}'.format(\n script=mnist_benchmark_script,\n data_dir=benchmark_spec.data_dir,\n iterations=benchmark_spec.iterations,\n model_dir=benchmark_spec.model_dir,\n batch_size=benchmark_spec.batch_size))\n else:\n mnist_benchmark_script = 'mnist.py'\n mnist_benchmark_cmd = ('cd models && '\n 'export PYTHONPATH=$(pwd) && '\n 'cd official/mnist && '\n 'python {script} '\n '--data_dir={data_dir} '\n '--model_dir={model_dir} '\n '--batch_size={batch_size} '.format(\n script=mnist_benchmark_script,\n data_dir=benchmark_spec.data_dir,\n model_dir=benchmark_spec.model_dir,\n batch_size=benchmark_spec.batch_size))\n\n if nvidia_driver.CheckNvidiaGpuExists(vm):\n mnist_benchmark_cmd = '{env} {cmd}'.format(\n env=tensorflow.GetEnvironmentVars(vm), cmd=mnist_benchmark_cmd)\n samples = []\n metadata = CreateMetadataDict(benchmark_spec)\n\n if benchmark_spec.train_steps > 0:\n if benchmark_spec.tpus:\n tpu = benchmark_spec.tpu_groups['train'].GetName()\n num_shards = '--num_shards={}'.format(\n benchmark_spec.tpu_groups['train'].GetNumShards())\n else:\n tpu = num_shards = ''\n\n if benchmark_spec.tpus:\n mnist_benchmark_train_cmd = (\n '{cmd} --tpu={tpu} --use_tpu={use_tpu} --train_steps={train_steps} '\n '{num_shards} --noenable_predict'.format(\n cmd=mnist_benchmark_cmd,\n tpu=tpu,\n use_tpu=bool(benchmark_spec.tpus),\n train_steps=benchmark_spec.train_steps,\n num_shards=num_shards))\n else:\n mnist_benchmark_train_cmd = (\n '{cmd} --train_epochs={train_epochs} '.format(\n cmd=mnist_benchmark_cmd,\n train_epochs=benchmark_spec.train_epochs))\n\n start = time.time()\n stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_train_cmd)\n elapsed_seconds = (time.time() - start)\n samples.extend(MakeSamplesFromTrainOutput(\n metadata, stdout + stderr, elapsed_seconds, benchmark_spec.train_steps))\n\n if benchmark_spec.eval_steps > 0:\n if benchmark_spec.tpus:\n mnist_benchmark_eval_cmd = (\n '{cmd} --tpu={tpu} --use_tpu={use_tpu} --eval_steps={eval_steps}'\n .format(\n cmd=mnist_benchmark_cmd,\n use_tpu=bool(benchmark_spec.tpus),\n tpu=benchmark_spec.tpu_groups['eval'].GetName(),\n eval_steps=benchmark_spec.eval_steps))\n else:\n mnist_benchmark_eval_cmd = ('{cmd} --eval_steps={eval_steps}'.format(\n cmd=mnist_benchmark_cmd, eval_steps=benchmark_spec.eval_steps))\n\n stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_eval_cmd)\n samples.extend(MakeSamplesFromEvalOutput(metadata, stdout + stderr,\n elapsed_seconds))\n return samples", "def main():\n run_test_summary1a()\n run_test_summary1c()\n run_test_summary1c()", "def run_test(self):\n self.output_analytics = self.run_inference()\n self.output_df = pd.DataFrame(self.output_analytics)", "def run_performance( pkg_mod_iter ):\n for package, module_iter in pkg_mod_iter:\n print( package )\n print( \"=\"*len(package ) )\n print()\n for filename, modname in module_iter:\n print( filename, modname )\n try:\n module= __import__( package+\".\"+modname, fromlist=(modname,\"performance\") )\n module.performance()\n except AttributeError:\n pass # no performance() function in the module.", "def run(self, verbose=0):\n self.verbose = verbose\n self._preproc()\n self._lda()\n self._evaluate()", "def run_tests(self):\n\n self.test_report = []\n\n #dict of unsorted lists\n dict_of_un_lists = self.dict_un_lists_intersection_test(self.data_dict)\n self.test_report.append(dict_of_un_lists)\n\n #dict of sets\n dict_of_sets = self.build_dict_of_sets(self.data_dict)\n self.test_report.append(self.dict_sets_intersection_test(dict_of_sets))\n\n #pandas - experimental and probably not the way to use pandas\n # dict_of_pandas = self.build_dict_of_panda_series(self.data_dict)\n # self.test_report.append(self.dicts_any_intersection_node_test(dict_of_pandas))\n\n # print results\n\n if self.verbose:\n self.print_tests_results()", "def run(self):\n\n def stop(result):\n if isinstance(result, Exception):\n self.error = result\n self.stop()\n #for e in events[0:100]:\n # print(e)\n dt = (self.stopped-self.started)*1000\n print(\"Took {}ms\".format(dt))\n #print(\"Events: {}\").format(len(events))\n if self.profile:\n stats = pstats.Stats(self.profiler)\n for sort in ['tottime','cumtime']:\n stats.sort_stats(sort)\n stats.print_stats(0.1)\n\n self.done.then(stop)\n self.deferred_call(self.show_view)\n self.start()\n if self.error:\n raise self.error", "def emPerformance(filesAndDirectories='None', resultsFileName='None', iterationCount='3', modes='None', testTypes='None', viewports='None', verbose='False'):\n\n pass", "def run_all_tests(self) -> None:\n self.run_trt_precision_tests()\n logging.info(\"Check analysis result at: %s\", self._output_dir)", "def run_test_suite(self, test_config):\n # Folder to store suite results\n test_config['test_suite_start_time'] = datetime.datetime.now().strftime(\n '%Y%m%dT%H%M%S')\n\n instance = cluster_local.UseLocalInstances()\n for i in range(test_config['repeat']):\n self.run_benchmark(test_config, instance, copy=i)\n\n suite_dir_name = '{}_{}'.format(test_config['test_suite_start_time'],\n test_config['test_id'])\n reporting.process_folder(\n os.path.join(self.workspace, 'results', suite_dir_name),\n report_config=self.auto_test_config)", "def faster(self):\n self.run_command('faster')", "def run(self):\n # get components list\n #component_id_list = self.getComponentsList()\n asset_id = 3776\n component_id_list = self.get_component_info_for_one_asset(asset_id)\n # call computeResults method\n results = self.compute_results(component_id_list)\n # write to the output file\n self.write_to_file(results)", "def run(self):\n self.evaluate()\n self.accumulate()\n self.summarize()", "def output_benchmark_results(output_dir, ts_agent_list=None, ga_agent=None, title=None, auto_open=True):\n if (ts_agent_list is None or not all(ts_agent.benchmark for ts_agent in ts_agent_list)) \\\n and (ga_agent is None or not ga_agent.benchmark):\n raise UserWarning(\"agent arguments were None or were not ran in benchmark mode.\")\n\n if title is None:\n title = \"Benchmark Run {}\".format(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\"))\n\n output_dir = Path(output_dir)\n\n if not output_dir.exists():\n output_dir.mkdir(parents=True)\n\n def compute_stats(lst):\n return {\n 'min': round(min(lst)),\n 'median': round(statistics.median(lst)),\n 'max': round(max(lst)),\n 'std': round(statistics.stdev(lst)) if len(lst) > 1 else 0,\n 'var': round(statistics.variance(lst)) if len(lst) > 1 else 0,\n 'mean': round(statistics.mean(lst))\n }\n\n # tabu search results\n if ts_agent_list is not None and all(ts_agent.benchmark for ts_agent in ts_agent_list):\n _create_ts_plots(ts_agent_list, output_dir)\n ts_result_makespans = []\n ts_initial_makespans = []\n ts_iterations = []\n for ts_agent in ts_agent_list:\n ts_result_makespans.append(ts_agent.best_solution.makespan)\n ts_initial_makespans.append(ts_agent.initial_solution.makespan)\n ts_iterations.append(ts_agent.benchmark_iterations)\n\n ts_result_makespans_stats = compute_stats(ts_result_makespans)\n ts_initial_makespans_stats = compute_stats(ts_initial_makespans)\n ts_iterations_stats = compute_stats(ts_iterations)\n\n else:\n ts_result_makespans_stats = None\n ts_initial_makespans_stats = None\n ts_iterations_stats = None\n\n # genetic algorithm results\n if ga_agent is not None and ga_agent.benchmark:\n _create_ga_plots(ga_agent, output_dir)\n ga_initial_makespans = [sol.makespan for sol in ga_agent.initial_population]\n ga_result_makespans = [sol.makespan for sol in ga_agent.result_population]\n\n ga_initial_makespans_stats = compute_stats(ga_initial_makespans)\n ga_result_makespans_stats = compute_stats(ga_result_makespans)\n\n else:\n ga_initial_makespans_stats = None\n ga_result_makespans_stats = None\n\n # render template\n template = template_env.get_template(benchmark_template)\n rendered_template = template.render(\n title=title,\n ts_agent_list=ts_agent_list,\n ts_initial_makespans_stats=ts_initial_makespans_stats,\n ts_result_makespans_stats=ts_result_makespans_stats,\n iterations_per_ts_agent_stats=ts_iterations_stats,\n output_directory=output_dir.resolve(),\n ga_agent=ga_agent,\n ga_initial_makespans_stats=ga_initial_makespans_stats,\n ga_result_makespans_stats=ga_result_makespans_stats,\n )\n\n # create index.html\n with open(output_dir / 'index.html', 'w') as output_file:\n output_file.write(rendered_template)\n\n if auto_open:\n webbrowser.open(f'file://{output_dir.resolve()}/index.html')", "def main(layers=None, modules=None):\n test_runs = create_test_runs(layers=layers, modules=modules)\n discovered_layers = set(param.get(\"layer\", \"Unknown layer\") for param in test_runs)\n for layer in discovered_layers:\n logger.debug(\"Discovered: %s\", layer)\n logger.debug(\"Discovered %d layers in total.\", len(discovered_layers))\n\n discovered_modules = set(\n param.get(\"module\", \"Unknown module\") for param in test_runs\n )\n for module in discovered_modules:\n logger.debug(\"Discovered: %s\", module)\n logger.debug(\"Discovered %d modules in total.\", len(discovered_modules))\n\n logger.debug(\"Running %d test runs.\", len(test_runs))\n\n # Counter system congestion and hyperthreading, FWIW\n concurrency = max(1, cpu_count() // 2 - 1)\n logger.debug(\"Timing tests in up to %d processes in parallel.\", concurrency)\n pool = Pool(concurrency)\n\n logger.debug(\"Timing layers - this can take a while!\")\n start_time = time()\n results = sorted(\n pool.imap_unordered(run_tests, test_runs),\n key=lambda result: result.get(\"runtime\", 0.0),\n )\n\n pool.terminate()\n pool.join()\n\n wallclock = humanize_time(time() - start_time)\n logger.debug(\"Done timing layers in %s.\", wallclock)\n\n total_runtime = sum(result.get(\"runtime\", 0.0) for result in results)\n total_count = sum(result.get(\"count\", 0) for result in results)\n\n classname_width = max(len(result[\"classname\"]) for result in results)\n count_width = max(len(str(result.get(\"count\", 0))) + 4 for result in results)\n speed_width = max(\n len(\"{:.3f}\".format(result.get(\"speed\", 0))) + 4 for result in results\n )\n runtime_width = max(\n len(humanize_time(result.get(\"runtime\", 0.0))) + 4 for result in results\n )\n\n header = (\n \"{classname:>{classname_width}}\"\n \"{count:>{count_width}}\"\n \"{speed:>{speed_width}}\"\n \"{runtime:>{runtime_width}}\"\n \"{runtime_percentage:>10}\" # 9.2f\n \"{count_percentage:>10}\" # 9.2f\n \"{relative_weight:>11}\".format( # 10.2f\n classname=\"classname\",\n count=\"cnt\",\n speed=\"spd\",\n runtime=\"rt\",\n runtime_percentage=\"rt%\",\n count_percentage=\"cnt%\",\n relative_weight=\"wt%\",\n classname_width=classname_width,\n count_width=count_width + 6, # Suffix \" tests\"\n speed_width=speed_width + 9, # Suffix \" s / test\"\n runtime_width=runtime_width,\n )\n )\n logger.info(header)\n header_width = len(header)\n logger.info(\"=\" * header_width)\n\n for result in results:\n classname = result[\"classname\"]\n count = result.get(\"count\", 0)\n runtime = result.get(\"runtime\", 0.0)\n speed = result.get(\"speed\", 0.0)\n runtime = result.get(\"runtime\", 0)\n\n runtime_percentage = runtime / total_runtime\n count_percentage = float(count) / float(total_count)\n try:\n relative_weight = runtime_percentage / count_percentage\n except ZeroDivisionError:\n # Something failed and count thus is 0\n relative_weight = 0.0\n\n runtime = humanize_time(runtime)\n line = (\n \"{classname:>{classname_width}}\"\n \"{count:>{count_width}} tests\"\n \"{speed:>{speed_width}.3f} s / test\"\n \"{runtime:>{runtime_width}}\"\n \"{runtime_percentage:9.2f}%\"\n \"{count_percentage:>9.2f}%\"\n \"{relative_weight:>10.2f}%\".format(\n classname=classname,\n count=count,\n speed=speed,\n runtime=runtime,\n runtime_percentage=runtime_percentage * 100,\n count_percentage=count_percentage * 100,\n relative_weight=relative_weight * 100,\n classname_width=classname_width,\n count_width=count_width,\n speed_width=speed_width,\n runtime_width=runtime_width,\n )\n )\n logger.info(line)\n\n total = humanize_time(total_runtime)\n total_runtime_width = len(total)\n wallclock_width = len(wallclock)\n totals_width = max(wallclock_width, total_runtime_width)\n\n total_line = \"Total: {:>{totals_width}}\".format(\n total, totals_width=totals_width\n )\n wallclock_line = \"Wallclock: {:>{totals_width}}\".format(\n wallclock, totals_width=totals_width\n )\n logger.info(\"-\" * header_width)\n logger.info(\"Sorted by runtime.\")\n logger.info(\"\")\n logger.info(total_line)\n logger.info(wallclock_line)\n\n failed_runs = [result for result in results if result.get(\"failed\")]\n if failed_runs:\n logger.warn(\"Test run failures detected - YMMV!\")\n for run in failed_runs:\n logger.warn(\"Failures in: %s\", run.get(\"classname\", \"Unknown test class\"))", "def run_test_suite(test_suite_description: Dict[str, List[str]],\n test_executable: str, perf_counters: List[str],\n num_threads: Optional[int]):\n\n if num_threads is None:\n num_threads = 1\n\n test_descriptions = []\n for test in test_suite_description['tests']:\n test_descriptions.append((test_executable, test, perf_counters))\n\n test_data_output = Parallel(n_jobs=num_threads)(\n delayed(run_and_parse)(test_description)\n for test_description in test_descriptions)\n\n formatted_test_data = []\n for test_instance in test_data_output:\n if test_instance:\n test_info = {'name': test_instance[0], 'iterations': 1}\n test_info.update(test_instance[1])\n formatted_test_data.append(test_info)\n\n return formatted_test_data", "def run(bench, budget):\n\n # Get the set of hypeparameter configuration space possible in this benchmark\n cs = bench.get_configuration_space()\n\n ##############################################################################\n # Begin implementation\n ##############################################################################\n bomo = BOMO(cs)\n\n for _ in range(budget):\n i = bomo.sample()\n sample = bench.objective_function(cs[i])\n print(\"Sample:\", sample)\n bomo.fit_predict(sample)\n\n ##############################################################################\n # End implementation\n ##############################################################################\n # This needs to be called at the end of a run\n bench.done()", "def experiment(duration_s, cmd, frequency):\n systrace_output = System.systrace_start(\n te, os.path.join(te.res_dir, 'trace.html'), conf=conf)\n systrace_output.expect(\"Starting tracing\")\n\n if frequency:\n run_page_stats(duration_s, frequency)\n elif cmd:\n target.execute(cmd)\n else:\n sleep(duration_s)\n\n systrace_output.sendline(\"\")\n System.systrace_wait(te, systrace_output)\n te.platform_dump(te.res_dir)", "def execute_benchmark(path, budget=None, threads=None):\n benchmark_path = runtime.binary_path(path)\n cmd = [benchmark_path, \"--benchmark_format=json\"]\n if budget is not None:\n cmd += [\"-b\", str(budget)]\n if threads is not None:\n cmd += [\"-t\", str(threads)]\n res = process.execute(cmd)\n return json.loads(res.std_out)", "def run_benchmark(take_geo_mean, num_runs, bench_func, *args):\n #if options.profile:\n # import cProfile\n # prof = cProfile.Profile()\n # prof.runcall(bench_func, num_runs, *args)\n # prof.print_stats(sort=options.profile_sort)\n #else:\n data = bench_func(num_runs, *args)\n if take_geo_mean:\n product=1\n _total=0\n for _x in data:\n _total+=_x\n product *= _x\n _geo_mean=math.pow(product, 1.0 / len(data))\n return \"Runs: %d, Total Time:%5.3f, Geo Mean:%6.4f\" % (len(data), _total, _geo_mean)\n else:\n for x in data:\n print(x)", "def main():\n install_dependencies()\n if not check_requirements():\n return\n prepare_db()\n all_browsers_count = len(cfg.get(\"browserIDs\"))\n for index, browserID in enumerate(cfg.get(\"browserIDs\")):\n all_versions = cfg.get(\"browsers\")[browserID].get(\"versions\")\n collect_warnings(all_versions, browserID)\n progress = set_progress_percentage(index, all_browsers_count)\n print_progress(progress)\n get_sum_from_db()", "def run(self):\n if not self.__class__.profile is None:\n import cProfile\n cminstance = self\n cProfile.runctx('self._run()', globals(), locals(), self.__class__.profile)\n else:\n self._run()", "def run(self):\n self.run_measurement()\n self.run_analysis()\n if self.get_param_value('update'):\n self.run_update()\n self.dev.update_cancellation_params()\n\n if self.get_param_value('configure_mux_drive'):\n drive_lo_freqs = self.get_param_value('drive_lo_freqs')\n configure_qubit_mux_drive(self.qubits, drive_lo_freqs)", "def iperfDriver(self):\n\n # A job denotes a traffic flow, which corresponds to an iperf task.\n jobs = self.config.trace.jobs\n if jobs:\n now = time()\n info(\"**** [G2]: iperf test started at:\", now, \"\\n\") # Prints Unix epoch of 'now'.\n\n procs = []\n for j in jobs:\n p = Process(target=self.launchIperf, args=(j,))\n procs.append(p)\n p.start()\n\n for p in procs:\n p.join()\n\n end = time()\n simTime = end-now\n info(\"**** [G2]: iperf test done successfully in %.2f\" %simTime, \"sec\\n\")\n with open(os.path.join(self.config.benchPath, \"%s_experiment_duration.csv\" %self.config.prefix), \"w\") as fd:\n fd.write(\"simulation duration, slowest flow duration\\n\")\n fd.write(\"%.2f,\" %simTime)\n else:\n info(\"**** [G2]: no flow found, iperf test unsuccessful \\n\")", "def post_result(self, halt_on_failure=False):\n self.api.m.perf_dashboard.set_default_config()\n self.api.m.perf_dashboard.post_bisect_results(\n self.get_result(), halt_on_failure)", "def run(self):\n set_thread_name_with_suffix(self.sandbox_handler.conf.name)\n self._is_stop_set()\n sandbox_report = self._run_sandbox_tests()\n\n with ft.ThreadPoolExecutor(thread_name_prefix=\"[Resource tests]\") as executor:\n futures = {\n executor.submit(self._execute_resource_tests, rh, sandbox_report)\n for rh in self.resource_handlers\n }\n ft.wait(futures)\n for future in futures:\n future.result()\n\n with self.REPORT_LOCK:\n self.reporting.sandboxes_reports.append(sandbox_report)" ]
[ "0.7920896", "0.68559283", "0.67580193", "0.6716297", "0.66414857", "0.65965855", "0.6546384", "0.65423447", "0.65336007", "0.6459301", "0.6339556", "0.6312379", "0.62926507", "0.6286289", "0.623686", "0.622832", "0.61696357", "0.61670846", "0.6156982", "0.6118324", "0.6117855", "0.61050993", "0.61001164", "0.6088979", "0.60484916", "0.6017004", "0.6016694", "0.6007208", "0.59964246", "0.5988883", "0.59841937", "0.5979538", "0.59539765", "0.5946811", "0.59440476", "0.59365594", "0.59280354", "0.5920262", "0.59180325", "0.59050417", "0.5900018", "0.5895225", "0.58880246", "0.587992", "0.58737254", "0.58638823", "0.58325547", "0.5808391", "0.5795943", "0.5794848", "0.5785413", "0.5784008", "0.5782332", "0.5753514", "0.57488036", "0.573773", "0.57333434", "0.57167786", "0.5712766", "0.57098526", "0.5704982", "0.5699858", "0.56939286", "0.5692395", "0.5688317", "0.56781095", "0.56763077", "0.56434417", "0.5643259", "0.5642766", "0.56419736", "0.56349313", "0.5634139", "0.5629408", "0.5622419", "0.5621716", "0.5621329", "0.561208", "0.56116277", "0.56098646", "0.56039214", "0.55995613", "0.55960685", "0.559349", "0.5589559", "0.5589428", "0.55814487", "0.5574793", "0.5566657", "0.5561004", "0.55581856", "0.5558058", "0.55576265", "0.5555388", "0.55539274", "0.55426794", "0.5532805", "0.55302304", "0.5528075", "0.5511962" ]
0.7640157
1
Make an ArgumentParser instance for benchmark options.
def make_argument_parser(): parser = argparse.ArgumentParser() parser.add_argument('--device', type=str, choices=['CPU', 'GPU'], help='Execution device.', required=True) parser.add_argument('-N', type=int, default=DEFAULT_N, help='Number of particles.') parser.add_argument('--rho', type=float, default=DEFAULT_RHO, help='Number density.') parser.add_argument('--dimensions', type=int, choices=[2, 3], help='Number of dimensions.', default=DEFAULT_DIMENSIONS) parser.add_argument('--warmup_steps', type=int, default=DEFAULT_WARMUP_STEPS, help='Number of timesteps to run before timing.') parser.add_argument('--benchmark_steps', type=int, default=DEFAULT_BENCHMARK_STEPS, help='Number of timesteps to run in the benchmark.') parser.add_argument('--repeat', type=int, default=DEFAULT_REPEAT, help='Number of times to repeat the run.') parser.add_argument('-v', '--verbose', action='store_true', help='Verbose output.') return parser
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_argument_parser():\n parser = Benchmark.make_argument_parser()\n parser.add_argument('--skip-reference',\n action='store_true',\n help='Skip the reference simulation run.')\n return parser", "def arg_parser(cls):\n parser = argparse.ArgumentParser(\n description='{} options'.format(cls.__name__),\n usage=('dotest.py --results-formatter-options='\n '\"--option1 value1 [--option2 value2 [...]]\"'))\n parser.add_argument(\n \"--dump-results\",\n action=\"store_true\",\n help=('dump the raw results data after printing '\n 'the summary output.'))\n return parser", "def make_argument_parser():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"data_directory\",\r\n help=\"Directory where the data files live.\")\r\n parser.add_argument(\"out\", help=\"Output directory of files.\")\r\n parser.add_argument(\"-t\", \"--test\", action=\"store_true\",\r\n help=(\"Test mode, avoids slow classifiers and uses\"\r\n \" 3 folds\"))\r\n parser.add_argument(\"--folds\", default=10,\r\n help=\"Number of folds for n-fold cross validation\")\r\n parser.add_argument(\"--data_pattern\", default=\"*.mat\",\r\n help=\"Pattern for data files\")\r\n parser.add_argument(\"--label_pattern\", default=\"*.mat\",\r\n help=\"Pattern for label files\")\r\n return parser", "def build_arg_parser():\n\n main = ArgumentParser(description='AMFinder command-line arguments.',\n allow_abbrev=False,\n formatter_class=RawTextHelpFormatter)\n\n subparsers = main.add_subparsers(dest='run_mode', required=True,\n help='action to be performed.')\n\n _ = training_subparser(subparsers)\n _ = prediction_subparser(subparsers)\n _ = diagnostic_subparser(subparsers)\n\n return main", "def set_options():\n parser = argparse.ArgumentParser(description='test hexrd.quadrature')\n\n return parser", "def cmd_line_parser():\n usage = \"usage: %prog [options]\\n\"\n opt_parser = OptionParser(usage=usage)\n opt_parser.add_option(\"--ai\", action=\"store\", dest=\"alternative_input\",\n help=\"an alternative input file (works only with load_from_pickle)\")\n opt_parser.add_option(\"--dl\", action=\"store\", dest=\"dumped_lexicon\",\n help=\"a dumped lexicon file (works only with load_from_pickle\")\n opt_parser.add_option(\"--dotest\", action=\"store_true\", dest=\"dotest\", default=False,\n help=\"use this flag if you want to apply testing\")\n opt_parser.add_option(\"-t\", action=\"store\", dest=\"test_parses\",\n help=\"the output file for the test parses\")\n opt_parser.add_option(\"-n\", action=\"store\", dest=\"train_parses\",\n help=\"the output file for the train parses\")\n opt_parser.add_option(\"-i\", dest=\"inp_file\", default=\"trainFiles/trainPairs\",\n help=\"the input file names (with the annotated corpus)\")\n opt_parser.add_option(\"--devel\", dest=\"development_mode\", default=False, action=\"store_true\",\n help=\"development mode\")\n\n return opt_parser", "def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('url', help='url to scrape')\n return parser", "def MakeOpts():\n parser = ArgumentParser()\n\n parser.add_argument(\"-o\", \"--host\", dest=\"host\", default=\"hldbv02\",\n help=\"The hostname for the MySQL database\")\n parser.add_argument('-d', '--debug', action='store_true', default=False,\n help='debug mode, store results in dummy DB')\n \n xml_group = parser.add_mutually_exclusive_group(required=True)\n xml_group.add_argument(\"-x\", \"--xml_filename\", default=None,\n help=\"The filename for a single XML result file\")\n xml_group.add_argument(\"-a\", \"--xml_dir\", default=None,\n help=\"The directory from which to import the latest XML results file\")\n \n parser.add_argument(\"-p\", \"--plate\", default=None, type=int, required=True,\n help=\"The plate number (usually between 1-10) in the robot script\")\n parser.add_argument('exp_id_csv', nargs=1,\n help='the name of the CVS file where the exp_ids are')\n\n return parser", "def make_parser():\n p = argparse.ArgumentParser(\n description=\"Visualize and analyze error from oblique/straight tag observations\"\n )\n\n p.add_argument(\"-n\", help=\"name of the test in the config file\")\n\n p.add_argument(\"-t\", help=\"throw out bad tags\", action=\"store_true\")\n\n p.add_argument(\"-v\", help=\"visualize data\", action=\"store_true\")\n\n p.add_argument(\"-i\", help=\"print result data\", action=\"store_true\")\n\n return p", "def _create_parser(self):\n default_options = self._create_defaults()\n\n all_categories = ['build', 'whitespace']\n\n mock_stderr = self._MockStdErr()\n\n return ArgumentParser(\n all_categories=all_categories,\n base_filter_rules=[],\n default_options=default_options,\n mock_stderr=mock_stderr,\n usage='test usage')", "def get_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', type=str)\n parser.add_argument('--method', type=str)\n parser.add_argument('--size_part', type=float, default=None)\n parser.add_argument('--start', type=int, default=0)\n parser.add_argument('--count', type=int, default=None)\n return parser", "def setup_options_parser(self, argparser):\n pass", "def command_line_argument_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(\n description=description(),\n epilog=epilog(),\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n return parser", "def command_line_argument_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(\n description=description(),\n epilog=epilog(),\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n return parser", "def build_argument_parser():\n description=\"A simple tool to batch rename given files.\"\n parser = ArgumentParser(description=description)\n parser.add_argument(\"-i\", \"--input-list\", required=False,\n help=\"the path to the input list file.\")\n parser.add_argument(\"-p\", \"--glob-pattern\", default=DEFAULT_GLOB_PATTERN,\n help=\"a glob pattern to filter input files.\")\n return parser", "def create_basic_parse():\n # SEE: https://docs.python.org/3/library/argparse.html\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--path_cover', type=str, required=True,\n help='path to the csv cover file')\n parser.add_argument('-d', '--path_dataset', type=str, required=False,\n help='path to the dataset location, '\n 'if missing in cover', default=None)\n parser.add_argument('-o', '--path_out', type=str, required=True,\n help='path to the output directory')\n parser.add_argument('--unique', dest='unique', action='store_true',\n help='whether each experiment have unique time stamp')\n parser.add_argument('--visual', dest='visual', action='store_true',\n help='whether visualise partial results')\n parser.add_argument('--lock_expt', dest='lock_thread', action='store_true',\n help='whether lock to run experiment in single thread')\n parser.add_argument('--run_comp_benchmark', action='store_true',\n help='run computation benchmark on the end')\n parser.add_argument('--nb_workers', type=int, required=False, default=1,\n help='number of registration running in parallel')\n return parser", "def parser(cls, *, with_showtb=False):\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument('-v', '--verbose', action='count', default=0,\n help='produce more output')\n parser.add_argument('-q', '--quiet', action='count', default=0,\n help='produce less output')\n parser.add_argument('--dry-run', dest='dryrun', action='store_true',\n default=False, help='do not actually make changes')\n\n if with_showtb:\n parser.add_argument('--traceback', action='store_true',\n default=False, help='do not hide tracebacks')\n\n return parser", "def make_cli_parser(self):\n super(ContextualArgParser, self).make_cli_parser()\n self.cli_parser.add_option('--num-permutations', type='int',\n default=cbpn.NUM_PERMUTATIONS,\n help=(\"number of permutations for statistics \"\n \"[default: %default]\")\n )\n self.cli_parser.add_option('-s', '--edge-swaps', type='int',\n help=(\"Perform the given number of edge swaps to \"\n \"produce random graphs. [NOTE: using this option \"\n \"changes the algorithm for determining \"\n \"significance of a link between each given pair \"\n \"of terms.]\"\n )\n )\n self.cli_parser.add_option('--no-estimation', dest='estimate',\n action='store_false', default=True,\n help=(\"Do not use p-value estimation, but run the \"\n \"full number of permutations for every pair of \"\n \"annotation terms. [NOTE: this can substantially \"\n \"increase running time.]\"\n )\n )\n self.cli_parser.add_option('--score-correction',\n action='store_true', default=False,\n help=(\"Correct scores for each pair of terms by an \"\n \"\\\"expected\\\" value calculated from the mean \"\n \"expression value.\"\n )\n )", "def get_parser():\n\n parser = argparse.ArgumentParser(description=textwrap.dedent(\"\"\"\n Downloads and tests the md5 and file size of a given version of Anaconda located in\n http://repo.continuum.io/archive/\n\n The version option (-v) allows you to select a specific version of Anaconda to download and test.\n This will include every system's Anaconda distribution for that version (OSX, Windows, Linux)\n\n The --log option will write the results of these tests to a log file. If not enabled, results\n will be written to stdout.\n\n If you already have Anaconda installers inside the pkgs directory and wish to test those without\n downloading new ones, use the --no-download option. NOTE: You will still need to provide the\n version (-v) of the installers.\n \"\"\"), formatter_class=argparse.RawTextHelpFormatter)\n\n parser.add_argument('--log', action='store_true', dest='log', default=False,\n help=\"save a log of any errors discovered\")\n parser.add_argument('-v', '--version', action='store', default=False,\n help=\"version of Anaconda to download and test\")\n parser.add_argument('--no-download', action='store_true', dest='nodl', default=False,\n help=\"test local anaconda packages in pkgs, rather than download new ones\")\n\n return parser", "def argument_parser():\n parser = argparse.ArgumentParser(\n description='description',\n formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('-n','--numcolors', type=int, help=\"Number of colors\", required=True)\n return parser", "def arg_parse():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-m\", \"--mix\", required=False, help=\"cube shuffle\")\n parser.add_argument(\"-e\", \"--explain\", action=\"store_true\", help=\"Get more explanation about steps\")\n options = parser.parse_args()\n return options", "def get_args():\n parser = argparse.ArgumentParser(description=\"Arguments for data exploration\")\n parser.add_argument(\"--tokenize\",\n dest=\"tokenize\",\n action=\"store_true\",\n help=\"Tokenize by words and sentences, counting averages/sd for each.\")\n return parser", "def initCmdLineParser():\n\n # Init parser and all general flags\n logging.debug(\"initiating command line option parser\")\n usage = \"usage: %prog [options]\"\n parser = OptionParser(usage)\n parser.add_option(\"--gen-answer-file\", help=\"Generate a template of an answer file, using this option excludes all other option\")\n parser.add_option(\"--answer-file\", help=\"Runs the configuration in none-interactive mode, extracting all information from the \\\n configuration file. using this option excludes all other option\")\n parser.add_option(\"--no-mem-check\", help=\"Disable minimum memory check\", action=\"store_true\", default=False)\n\n # For each group, create a group option\n for group in controller.getAllGroups():\n groupParser = OptionGroup(parser, group.getKey(\"DESCRIPTION\"))\n\n for param in group.getAllParams():\n cmdOption = param.getKey(\"CMD_OPTION\")\n paramUsage = param.getKey(\"USAGE\")\n optionsList = param.getKey(\"OPTION_LIST\")\n useDefault = param.getKey(\"USE_DEFAULT\")\n if not useDefault:\n if optionsList:\n groupParser.add_option(\"--%s\" % cmdOption, metavar=optionsList, help=paramUsage, choices=optionsList)\n else:\n groupParser.add_option(\"--%s\" % cmdOption, help=paramUsage)\n\n # Add group parser to main parser\n parser.add_option_group(groupParser)\n\n return parser", "def build_parser(usage, **kwargs):\n return BetterArgumentParser(usage=usage, version=VERSION, **kwargs)", "def create_arg_parser():\n server_modes = ['builtin', 'waitress']\n\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('h', metavar='HOST', help='Server HOST (e.g. \"localhost\")', type=str)\n parser.add_argument('p', metavar='PORT', help='Server PORT (e.g. \"5001\")', type=int)\n parser.add_argument('m', metavar='SERVER_MODE', help=\", \".join(server_modes), choices=server_modes, type=str)\n parser.add_argument('--debug', help=\"Run builtin server in debug mode\", action='store_true', default=False)\n\n return parser", "def argParser():\n parser = ArgumentParser(description=('Downloads problems from Project Euler'\n ' and saves copies locally.'))\n parser.add_argument('-s', '--start', type=int, default=1,\n help='The problem number to start the downloads at, default 1.')\n parser.add_argument('-e', '--end', type=int, default=None,\n help='The problem number to end the downloads at, default None.')\n return parser", "def _createOptionParser():\n usage = \\\n\"\"\"%prog [options] outputFile\n\nMines a large number of concise wine reviews from an online web site, and dumps\nthem to the given filename.\"\"\"\n\n parser = optparse.OptionParser(usage)\n\n parser.add_option('--debug', action='store_true', dest='debug',\n default=False, help='Enables debugging mode [False]')\n\n return parser", "def create_parser():\n parser = OptionParser()\n\n parser.add_option(\"-s\", \"--script\", dest=\"script\", default='pbs.sh', help=\"Output location\")\n parser.add_option(\"-p\", \"--period\", dest=\"period\", default=\"30\", help=\"qstat period\")\n\n parser.set_usage(\"\"\"%prog [options]\"\"\")\n return parser", "def create_options():\n optparser = optparse.OptionParser()\n optparser.add_option(\"-f\", \"--filename\", type=\"string\",\n help=\"execute a single unit test file\")\n optparser.add_option(\"-s\", \"--subprocess\", action=\"store_true\",\n default=False,\n help=\"run everything in an own subprocess \"\n \"(default: use a single process)\")\n optparser.add_option(\"-t\", \"--timeout\", type=\"int\", default=70,\n help=\"Timout for subprocesses before being killed \"\n \"(default: 70s per file)\")\n optparser.add_option(\"-v\", \"--verbose\", action=\"store_true\", default=False,\n help=\"be verbose and print anything instantly\")\n optparser.add_option(\"-r\", \"--random\", action=\"store_true\", default=False,\n help=\"randomize the order of tests\")\n optparser.add_option(\"-S\", \"--seed\", type=\"int\",\n help=\"seed the randomizer(useful to \"\n \"recreate earlier randomized test cases)\")\n optparser.add_option(\"-i\", \"--interactive\", action=\"callback\",\n callback=include_tag,\n callback_args=(\"interactive\",),\n help=\"also execute interactive tests\")\n optparser.add_option(\"-e\", \"--exclude\", action=\"callback\",\n callback=exclude_tag, type=\"string\",\n help=\"exclude test containing the tag\")\n optparser.add_option(\"-l\", \"--listtags\", action=\"callback\",\n callback=list_tags,\n help=\"lists all available tags and exits\")\n optparser.add_option(\"--logfile\", type=\"string\",\n help=\"save output to log file\")\n optkeys = [\"filename\",\n \"subprocess\",\n \"timeout\",\n \"random\",\n \"seed\",\n \"verbose\"\n ]\n return optparser, optkeys", "def getArgumentParser():\n parser = argparse.ArgumentParser(description=\"Script for running optimization for the ZH dark photon SR\")\n parser.add_argument('-i',\n '--infile',\n dest='infile',\n help='Input CSV file',\n default = '/afs/cern.ch/work/s/ssevova/public/dark-photon-atlas/zhdarkphotonml/samples/v09/mc16d_v09_samples.csv')\n parser.add_argument('-o',\n '--output',\n dest='outdir',\n help='Output directory for plots, selection lists, etc',\n default='outdir')\n parser.add_argument('--plotInputs',action='store_true', help='Plot scaled train & test inputs')\n parser.add_argument('--plotOutputs',action='store_true', help='Plot scaled test outputs for given probability range')\n parser.add_argument('--lower',help='Lower limit for conditional filtering')\n parser.add_argument('--upper',help='Upper limit for conditional filtering')\n\n return parser", "def parser(cls, *args, **kwargs):\n\n parser = ArgumentParser(*args, **kwargs)\n parser.add_argument('-a', \"--address\",\n help=\"Force entry point address\", default=None)\n parser.add_argument('-b', \"--dumpblocs\", action=\"store_true\",\n help=\"Log disasm blocks\")\n parser.add_argument('-z', \"--singlestep\", action=\"store_true\",\n help=\"Log single step\")\n parser.add_argument('-d', \"--debugging\", action=\"store_true\",\n help=\"Debug shell\")\n parser.add_argument('-g', \"--gdbserver\", type=int,\n help=\"Listen on port @port\")\n parser.add_argument(\"-j\", \"--jitter\",\n help=\"Jitter engine. Possible values are: gcc (default), tcc, llvm, python\",\n default=\"gcc\")\n parser.add_argument(\n '-q', \"--quiet-function-calls\", action=\"store_true\",\n help=\"Don't log function calls\")\n parser.add_argument('-i', \"--dependencies\", action=\"store_true\",\n help=\"Load PE and its dependencies\")\n\n for base_cls in cls._classes_():\n base_cls.update_parser(parser)\n return parser", "def make_parser():\n\n parser = ArgumentParser(description=\"Create dummy sensor stream esque data\")\n parser.add_argument('--tuples-per-emit', '-t', type=int, default=1,\n help='number of tuples to emit at once')\n parser.add_argument('--sensors', '-s', type=int, default=1,\n help='number of sensors to generate')\n\n return parser", "def create_cli_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('json_file', action='store',\n type=str, help=('Outlier per threshold file. This file '\n 'should have been generated by the '\n 'calculate_outliers_by_threshold '\n 'script.'))\n return parser", "def _create_parser():\n parser = ArgumentParser(description=\"A CLI that sends messages to an Azure event hub.\")\n\n parser.add_argument(\"--connection-string\", type=str, required=True,\n help=\"The Azure event hub connection string\")\n\n parser.add_argument(\"--name\", type=str, required=True,\n help=\"The Azure event hub name\")\n\n parser.add_argument(\"--interval\", type=int, required=False,\n help=\"The number of seconds to wait between sends. Defaults to 10 seconds.\")\n\n parser.add_argument(\"--what-if\", type=bool, required=False,\n help=\"Run the program without sending messages to the Event Hub. \"\n \"The app will log what would have been sent to the Event Hub.\")\n\n return parser", "def create_option_parser():\n from optparse import OptionParser\n usage='Usage: %prog [<options>] <bilingual file> <language tag 1> <language tag 2>'\n parser = OptionParser(usage=usage)\n\n parser.add_option(\n '-u', '--create-tuning',\n dest='tuning',\n help='Specify percentage of corpus to be used for tuning corpus.',\n default=0\n )\n parser.add_option(\n '-e', '--create-evaluation',\n dest='eval',\n help='Specify percentage of corpus to be used for tuning corpus.',\n default=0\n )\n return parser", "def create_argument_parser(cls):\n\n parser = super().create_argument_parser()\n\n # GitHub options\n group = parser.add_argument_group('GitHub arguments')\n\n group.add_argument(\"--owner\", required=True,\n help=\"GitHub owner\")\n group.add_argument(\"--repository\", required=True,\n help=\"GitHub repository\")\n group.add_argument(\"--sleep-for-rate\", dest='sleep_for_rate',\n action='store_true',\n help=\"sleep for getting more rate\")\n group.add_argument(\"--min-rate-to-sleep\", dest='min_rate_to_sleep',\n default=MIN_RATE_LIMIT, type=int,\n help=\"sleep until reset when the rate limit reaches this value\")\n\n return parser", "def initCmdLineParser():\n\n # Init parser and all general flags\n usage = \"usage: %prog [options] [--help]\"\n parser = OptionParser(usage=usage, version=\"0.1\")\n\n parser.add_option(\"-d\", \"--daemon\", action=\"store_true\", default=False, help=\"daemon mode\")\n parser.add_option(\"-c\", \"--config\", help=\"install config file\", default = 'test.conf')\n parser.add_option(\"-D\", \"--debug\", action=\"store_true\", help=\"debug mode\", default = False)\n\n parser.add_option(\"-a\", \"--add\", action=\"store_true\", help=\"add node to cluster\", default = False)\n parser.add_option(\"-p\", \"--port\", help= \"http server port\", default = '8999')\n\n\n return parser", "def define_command_line_options():\n \n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--T', '--stop_time', type=float, \n default=20.0, help='end time of simulation', \n metavar='t')\n parser.add_argument('--dt', type=float, default=0.1,\n help='timestep for the discrete apporoximation',\n metavar='dt')\n parser.add_argument('--v0', '--initial_condition', type=float,\n default=-0.0, help='initial condition v(0)',\n metavar='v0')\n parser.add_argument('--makeplot', action='store_true',\n help='display plot or not')\n parser.add_argument('--rho', type=float, default=1.0,\n help='air mass density', metavar='rho')\n parser.add_argument('--Cd', type=float, default=1.2,\n help='drag coefficient', metavar='Cd')\n parser.add_argument('--m', '--body_mass', type=float, default=100.,\n help='body mass', metavar='m')\n parser.add_argument('--A', type=float, default=0.5,\n help='body cross sectional area',\n metavar='A')\n parser.add_argument('--tp', type=float, default=-1,\n help='time of parachute deployment', metavar='tp')\n return parser", "def create_parser():\n parser = argparse.ArgumentParser(\n description=\"First example\",\n epilog=\"Batch 2017\")\n\n # script\n parser.add_argument('--script',\n required=True,\n action='store',\n dest='script',\n help=\"A script to execute\")\n\n parser.add_argument('--dataset',\n required=True,\n action='store',\n dest='dataset',\n help=\"A dataset to use\")\n#\n# parser.add_argument('--features',\n# required=True,\n# action='store',\n# dest='features',\n# help=\"Number of features\")\n return parser", "def init_parser():\n parser = OptionParser()\n parser.add_option(\"-n\", \"--interactive\", action=\"store_true\", help=\"run in interactive (non-daemon) mode\")\n parser.add_option(\"-r\", \"--run\", action=\"store_true\", help=\"starts process identified by -app parameter\")\n parser.add_option(\"-k\", \"--kill\", action=\"store_true\", help=\"kill process identified by -app parameter\")\n parser.add_option(\"-a\", \"--app\", action=\"store\", help=\"application to start (process name)\")\n parser.add_option(\"-q\", \"--query\", action=\"store_true\", help=\"query application's state\")\n parser.add_option(\"-i\", \"--install_ve\", action=\"store_true\", help=\"install a virtualenv for the runtime to use\")\n parser.add_option(\"-s\", \"--shell\", action=\"store_true\", help=\"run an ipython shell within the virtualenv\")\n parser.add_option(\"-t\", \"--tests\", action=\"store_true\", help=\"run tests\")\n parser.add_option(\"-x\", \"--xunit\", action=\"store_true\", help=\"run tests with coverage and xunit output for Jenkins\")\n parser.add_option(\"-z\", \"--analyze\", action=\"store_true\", help=\"run pylint on project\")\n parser.add_option(\"-l\", \"--list\", action=\"store_true\", help=\"list available applications\")\n parser.add_option(\"-o\", \"--outfile\", action=\"store\", help=\"save results from a report to a file\")\n return parser", "def build_parser():\n parser = argparse.ArgumentParser(description='The classic FizzBuzz game in programmatic form.', add_help=False)\n parser.add_argument('-h', '--help', default=argparse.SUPPRESS, action='help',\n help='Show this help message and exit.')\n parser.add_argument('-s', '--start', default=1, type=int, action='store', metavar='START',\n help='The number to start FizzBuzzing at (inclusive).')\n parser.add_argument('stop', type=int, action='store', metavar='STOP',\n help='The number to end FizzBuzzing at (exclusive).')\n return parser", "def setup_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-u\", \"--url\", dest='url', required=True,\n help=\"Falkonry Edge URL\")\n parser.add_argument(\"-i\", \"--input_file\", dest='input', required=True,\n help=\"Input data file to feed into Falkonry Edge Analyzer\")\n parser.add_argument(\"-o\", \"--output_file\", dest='output', required=True,\n help=\"File name to write Falkonry Edge Analyzer output\")\n parser.add_argument(\"-t\", \"--time_column\", dest='time', type=int, required=True,\n help=\"Time column index starting with 0\")\n parser.add_argument(\"-z\", \"--time_zone\", dest='zone', required=True,\n help=\"Time zone\")\n parser.add_argument(\"-f\", \"--time_format\", dest='format', required=True,\n help=\"Timestamp format\")\n parser.add_argument(\"-e\", \"--entity_column\", dest='entity', type=int,\n help=\"Entity column index starting with 0\")\n parser.add_argument(\"-b\", \"--batch_column\", dest='batch', type=int,\n help=\"Batch column index starting with 0\")\n parser.add_argument(\"-r\", \"--input_feed_rate\", dest='rate', type=int, default=1000,\n help=\"Number of records to send to edge per second.\")\n\n return parser", "def _make_argument_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(prog=\"pyrasaeco-render\", description=__doc__)\n subparsers = parser.add_subparsers(help=\"Commands\", dest=\"command\")\n subparsers.required = True\n\n once = subparsers.add_parser(\n \"once\", help=\"Render once the scenarios and the scenario ontology\"\n )\n\n continuously = subparsers.add_parser(\n \"continuously\",\n help=\"Re-render continuously the scenarios and the scenario ontology\",\n )\n\n continuously.add_argument(\n \"-p\",\n \"--port\",\n help=\"Port on which the demo server should listen to.\\n\\n\"\n \"If not specified, the demo server will not be started.\",\n type=int,\n )\n\n for command in [once, continuously]:\n command.add_argument(\n \"-s\",\n \"--scenarios_dir\",\n help=\"Directory where scenarios reside\\n\\n\"\n \"The rendering artefacts will be produced in-place in this directory.\",\n required=True,\n )\n\n return parser", "def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('webpage', help='webpage to search')\n\n return parser", "def mkOptionParser():\n \n usage = \"\"\"%prog <input.bed> <output.bed> <threshold>\n %prog filters out the lines that don't meet a certain threshold. \"\"\"\n\n parser = OptionParser(usage)\n \n\n return parser", "def _build_arg_parser():\n parser = argparse.ArgumentParser(\n description=_description,\n add_help=True,\n )\n add_generic_args(parser)\n add_diff_args(parser)\n add_filename_args(parser, [\"base\", \"remote\"])\n\n parser.add_argument(\n '-o', '--output',\n default=None,\n help=\"if supplied, the diff is written to this file. \"\n \"Otherwise it is printed to the terminal.\")\n\n return parser", "def make_arguments_parser():\n parser = ArgumentParser(description=__doc__, epilog=\"\"\"CSS-HTML-JS-Minify:\n Takes a file or folder full path string and process all CSS/HTML/JS found.\n If argument is not file/folder will fail. Check Updates works on Python3.\n Std-In to Std-Out is deprecated since it may fail with unicode characters.\n SHA1 HEX-Digest 11 Chars Hash on Filenames is used for Server Cache.\n CSS Properties are Alpha-Sorted, to help spot cloned ones, Selectors not.\n Watch works for whole folders, with minimum of ~60 Secs between runs.\"\"\")\n parser.add_argument('--version', action='version', version=__version__)\n parser.add_argument('fullpath', metavar='fullpath', type=str,\n help='Full path to local file or folder.')\n parser.add_argument('--wrap', action='store_true',\n help=\"Wrap output to ~80 chars per line, CSS only.\")\n parser.add_argument('--prefix', type=str,\n help=\"Prefix string to prepend on output filenames.\")\n parser.add_argument('--timestamp', action='store_true',\n help=\"Add a Time Stamp on all CSS/JS output files.\")\n parser.add_argument('--quiet', action='store_true', help=\"Quiet, Silent.\")\n parser.add_argument('--obfuscate', action='store_true',\n help=\"Obfuscate Javascript. JS only. (Recommended).\")\n parser.add_argument('--checkupdates', action='store_true',\n help=\"Check for updates from internet while running.\")\n parser.add_argument('--tests', action='store_true', help=\"Run Unit Tests.\")\n parser.add_argument('--hash', action='store_true',\n help=\"Add SHA1 HEX-Digest 11chars Hash to Filenames.\")\n parser.add_argument('--gzip', action='store_true',\n help=\"GZIP Minified files as '*.gz', CSS/JS only.\")\n parser.add_argument('--sort', action='store_true',\n help=\"Alphabetically Sort CSS Properties, CSS only.\")\n parser.add_argument('--comments', action='store_true',\n help=\"Keep comments, CSS/HTML only (Not Recommended)\")\n parser.add_argument('--overwrite', action='store_true',\n help=\"Force overwrite all in-place (Not Recommended)\")\n parser.add_argument('--after', type=str,\n help=\"Command to execute after run (Experimental).\")\n parser.add_argument('--before', type=str,\n help=\"Command to execute before run (Experimental).\")\n parser.add_argument('--watch', action='store_true', help=\"Watch changes.\")\n parser.add_argument('--multiple', action='store_true',\n help=\"Allow Multiple instances (Not Recommended).\")\n parser.add_argument('--_42', action='store_true')\n global args\n args = parser.parse_args()", "def get_parser():\n parser = argparse.ArgumentParser(description=\"Tweet Downloader\")\n parser.add_argument(\"-d\",\n \"--data\",\n dest=\"data\",\n help=\"Read data from file or display initial setting\",\n default=False)\n\n return parser", "def parse_args():\n parser = OptionParser()\n parser.add_option('--data-file', '-f', default='train_data.hdf5',\n help=\"The path to the data file\")\n parser.add_option('--runs-per-epoch', '-r', type='int',\n help=\"The number of runs per epoch (train samples count)\")\n parser.add_option('--avg-window-size', '-w', default='1', type='int',\n help=\"The window size for moving average\")\n\n (options, args) = parser.parse_args()\n return options", "def get_parser():\n parser = ArgumentParser(\n description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter\n )\n parser.add_argument(\n \"-s\", \"--sentence\", dest=\"sentence\", help=\"sentence, splitted by ';'\"\n )\n return parser", "def mujoco_arg_parser():\n parser = arg_parser()\n parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2')\n parser.add_argument('--seed', help='RNG seed', type=int, default=0)\n parser.add_argument('--num-timesteps', type=int, default=int(1e6))\n parser.add_argument('--play', default=False, action='store_true')\n return parser", "def get_parser():\n module_parser = ArgumentParser(\n formatter_class=ArgumentDefaultsHelpFormatter)\n module_parser.add_argument(\"-i\", dest=\"data_path\", type=str,\n help=\"the location dataset\")\n module_parser.add_argument(\"-o\", dest=\"output_path\", type=str,\n help='base dir for outputs')\n module_parser.add_argument(\"-subdir\", dest=\"subdir\", type=str,\n choices=['test', 'train', 'val', 'all'],\n help='subdir: trn, test, val, or all ...')\n module_parser.add_argument(\"-n\", dest=\"n_train\", type=int,\n help='n: number of images for training')\n module_parser.add_argument(\"-Rx\", dest=\"x_res\", type=int,\n help='x resulution for final img')\n module_parser.add_argument(\"-Ry\", dest=\"y_res\", type=int,\n help='y resolution of final image')\n module_parser.add_argument(\"-d\", dest=\"d\",\n type=int,\n default=0,\n help='debug')\n return module_parser", "def create_arguments_parser():\n description = \"Statically analyse SBML files for modelling errors\"\n parent_arg_parser = rate_checker_sbml.create_arguments_parser()\n parser = argparse.ArgumentParser(description=description,\n parents=[parent_arg_parser])\n return parser", "def create_basic_parser(name=''):\n # SEE: https://docs.python.org/3/library/argparse.html\n parser = argparse.ArgumentParser('Benchmark on Image Registration - %s' % name)\n parser.add_argument('-n', '--name', type=str, required=False, default=None, help='custom experiment name')\n parser.add_argument('-t', '--path_table', type=str, required=True, help='path to the csv cover file')\n parser.add_argument(\n '-d',\n '--path_dataset',\n type=str,\n required=False,\n default=None,\n help='path to the dataset location, if missing in table'\n )\n parser.add_argument('-o', '--path_out', type=str, required=True, help='path to the output directory')\n parser.add_argument(\n '--unique', dest='unique', action='store_true', help='whether each experiment have unique time stamp'\n )\n parser.add_argument('--visual', dest='visual', action='store_true', help='whether visualise partial results')\n parser.add_argument(\n '-pproc',\n '--preprocessing',\n type=str,\n required=False,\n nargs='+',\n help='use some image pre-processing, the other matter',\n choices=['gray'] + ['matching-%s' % clr for clr in CONVERT_RGB]\n )\n # parser.add_argument('--lock_expt', dest='lock_thread', action='store_true',\n # help='whether lock to run experiment in single thread')\n parser.add_argument('--run_comp_benchmark', action='store_true', help='run computation benchmark on the end')\n parser.add_argument(\n '--nb_workers', type=int, required=False, default=1, help='number of registration running in parallel'\n )\n return parser", "def argument_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--exp', type=str, default=\"Iris\",\n choices=[\"Iris\", \"BreastCancer\", \"Cifar10\"])\n parser.add_argument('--setting', type=int, default=1,\n choices=[1, 2, 3])\n return parser.parse_args()", "def parse_options() -> Namespace:\n\n opt_parser = OptionParser(\n \"liftoff\",\n [\n \"script\",\n \"config_path\",\n \"procs_no\",\n \"gpus\",\n \"per_gpu\",\n \"no_detach\",\n \"verbose\",\n \"copy_to_clipboard\",\n \"time_limit\", # This should be removed in favour of start_by\n \"start_by\",\n \"end_by\",\n \"optimize\",\n \"args\",\n \"filters\",\n \"results_path\",\n \"name\",\n \"max_runs\",\n \"shuffle\",\n ],\n )\n return opt_parser.parse_args()", "def options():\n parser = ArgumentParser()\n logging = parser.add_argument_group(\"log\")\n logging.add_argument(\n \"--log\",\n dest=\"loglevel\",\n default=\"WARNING\",\n choices=[\"WARNING\", \"INFO\", \"DEBUG\", \"ERROR\"],\n help=\"Set the log level\",\n )\n monitoring = parser.add_argument_group(\"monitoring\")\n monitoring.add_argument(\n \"--monitoring\", action=\"store_true\", help=\"Set the monitoring\"\n )\n mpi = parser.add_argument_group(\"mpi splitting\")\n mpi.add_argument(\n \"-npx\",\n dest=\"npx\",\n default=1,\n type=int,\n help=\"Set the number of processes in x direction\",\n )\n mpi.add_argument(\n \"-npy\",\n dest=\"npy\",\n default=1,\n type=int,\n help=\"Set the number of processes in y direction\",\n )\n mpi.add_argument(\n \"-npz\",\n dest=\"npz\",\n default=1,\n type=int,\n help=\"Set the number of processes in z direction\",\n )\n args, _ = parser.parse_known_args()\n return args", "def make_parser():\n\n parser = argparse.ArgumentParser(add_help=True)\n\n parser_grp_main = parser.add_argument_group('Arguments')\n\n parser_grp_main.add_argument\n\n parser_grp_main.add_argument(\n \"-i\",\n \"--inp-dir\",\n default = \"out/ln/alias/sst/all_samples\",\n help=\"The folder containing files to tidy.\"\n )\n\n parser_grp_main.add_argument(\n \"-x\",\n \"--xlsx\",\n type=str,\n help=\"The xlsx file containing the metadata to use to find samples and tidy them.\",\n default=\"Sequencing_summary.xlsx\",\n required=False)\n\n parser_grp_main.add_argument(\n \"-b\",\n \"--by-column\",\n nargs='+',\n type=str,\n help=\"The column names from the xlsx file to use to tidy.\",\n default=\"sample_name\",\n required=False)\n \n parser_grp_main.add_argument(\n \"-d\",\n \"--delete\",\n help=\"Delete file only this arg is used. Unsafe. Always run first without this argument and check all files listed to deletion.\",\n default=False,\n type=bool,\n )\n\n return parser", "def parseArguments():\n parser = argparse.ArgumentParser(description='Tool run benchmarks and query database')\n parser.add_argument('--version', action=\"store_true\", dest=\"version\", default=False, help=\"Print version\")\n parser.add_argument(\"--query\", \"-q\", action=\"store_true\", dest=\"queryDataBase\", default=False, help=\"Query Data Base\")\n parser.add_argument(\"--performance\", \"-p\", action=\"store_true\", dest=\"queryPerformance\", default=False, help=\"Query Data Base - Performance Metrics\")\n parser.add_argument(\"--run\", \"-r\", action=\"store_true\", dest=\"runBenchmarks\", default=False, help=\"Run Benchmarks and store results in the DB\")\n args = parser.parse_args()\n return args", "def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--todir', help='destination directory for downloaded images')\n parser.add_argument('logfile', help='apache logfile to extract urls from')\n\n return parser", "def build_parser() -> ArgumentParser:\n parser = ArgumentParser(prog=\"bartender\")\n parser.add_argument(\"--version\", action=\"version\", version=version(\"bartender\"))\n subparsers = parser.add_subparsers(dest=\"subcommand\")\n\n serve_sp = subparsers.add_parser(\"serve\", help=\"Serve web service\")\n serve_sp.set_defaults(func=serve)\n\n perform_sp = subparsers.add_parser(\"perform\", help=\"Async Redis queue job worker\")\n perform_sp.add_argument(\n \"--config\",\n default=Path(\"config.yaml\"),\n type=Path,\n help=\"Configuration with schedulers that need arq workers\",\n )\n perform_sp.add_argument(\n \"--destination\",\n nargs=\"+\",\n help=\"\"\"Name of destinations to run workers for.\n Each destination must have `scheduler.type:arq`.\n By default runs workers for all destinations with `scheduler.type:arq`.\"\"\",\n dest=\"destination_names\",\n )\n perform_sp.set_defaults(func=perform)\n\n add_generate_token_subcommand(subparsers)\n\n return parser", "def _init_parser():\n\t\n\t_parser = argparse.ArgumentParser()\n\t_parser.add_argument(\"--pull\", help=\"pull scripts from UR3\", action=\"store_true\")\n\t_parser.add_argument(\"--create\", help=\"create data base from script files\", action=\"store_true\")\n\t_parser.add_argument(\"--clear\", help=\"clear all data base\", action=\"store_true\")\n\treturn _parser", "def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-d', '--todir', help='destination directory for downloaded images')\n parser.add_argument('logfile', help='apache logfile to extract urls from')\n\n return parser", "def init_parser():\n parser = argparse.ArgumentParser(\n description='Backup application code and data.')\n parser.add_argument('-a', '--app-id', required=True,\n help='the application ID to run the backup for')\n parser.add_argument('--source-code', action='store_true',\n default=False, help='backup the source code too. Disabled by default.')\n parser.add_argument('-d', '--debug', required=False, action=\"store_true\",\n default=False, help='display debug messages')\n parser.add_argument('--skip', required=False, nargs=\"+\",\n help='skip the following kinds, separated by spaces')\n\n return parser", "def get_parser(self):\n parser = argparse.ArgumentParser(description='Short sample app')\n\n parser.add_argument('-a', action=\"store_true\", default=False)\n parser.add_argument('-b', action=\"store\", dest=\"b\")\n parser.add_argument('-c', action=\"store\", dest=\"c\", type=int)\n return parser", "def parse_args():\n parser = ArgumentParser()\n parser.add_argument('-t', '--timer', action='store_true', \\\n help='Time the first random generation')\n parser.add_argument('-i', '--ibmq', default='', help='IBMQ token')\n parser.add_argument('-b', '--backend', default='', help='IBMQ backend')\n return parser.parse_args()", "def get_base_argument_parser(\n **kwargs\n) -> ArgumentParser:\n\n parser = ArgumentParser(\n allow_abbrev=False,\n add_help=False,\n **kwargs\n )\n\n parser.add_argument(\n '--help',\n action='store_true',\n help='Pass this flag to print usage and argument descriptions.'\n )\n\n parser.add_argument(\n '--log',\n choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],\n help='Logging level.'\n )\n\n return parser", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Bandits algorithms on a click-through \"\n \"rate dataset.\")\n parser.add_argument('--plot', action='store_true')\n return parser.parse_args()", "def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--todir',\n help='destination directory for downloaded images')\n parser.add_argument('logfile', help='apache logfile to extract urls from')\n\n return parser", "def atari_arg_parser():\n parser = arg_parser()\n parser.add_argument('--env', help='environment ID', default='BreakoutNoFrameskip-v4')\n parser.add_argument('--seed', help='RNG seed', type=int, default=0)\n parser.add_argument('--num-timesteps', type=int, default=int(10e6))\n return parser", "def _create_argument_parser():\n\n parser = argparse.ArgumentParser(\n description=\"Execute a CPAchecker run in the VerifierCloud using the web interface.\"\n + \" Command-line parameters can additionally be read from a file if file name prefixed with '@' is given as argument.\",\n fromfile_prefix_chars=\"@\",\n add_help=False, # conflicts with -heap\n )\n\n parser.add_argument(\"-h\", \"--help\", action=\"help\", help=\"Prints this help.\")\n\n parser.add_argument(\n \"--cloudMaster\",\n dest=\"cloud_master\",\n default=\"https://vcloud.sosy-lab.org/cpachecker/webclient/\",\n metavar=\"HOST\",\n help=\"Sets the webclient host of the VerifierCloud instance to be used.\",\n )\n\n parser.add_argument(\n \"--cloudPriority\",\n dest=\"cloud_priority\",\n metavar=\"PRIORITY\",\n help=\"Sets the priority for this benchmark used in the VerifierCloud. Possible values are IDLE, LOW, HIGH, URGENT.\",\n )\n\n parser.add_argument(\n \"--cloudCPUModel\",\n dest=\"cpu_model\",\n type=str,\n default=None,\n metavar=\"CPU_MODEL\",\n help=\"Only execute runs in the VerifierCloud on CPU models that contain the given string.\",\n )\n\n parser.add_argument(\n \"--cloudUser\",\n dest=\"cloud_user\",\n metavar=\"USER:PWD\",\n help=\"The user and password for the VerifierCloud.\",\n )\n\n parser.add_argument(\n \"--revision\",\n dest=\"revision\",\n metavar=\"BRANCH:REVISION\",\n help=\"The svn revision of CPAchecker to use.\",\n )\n\n parser.add_argument(\n \"-d\", \"--debug\", action=\"store_true\", help=\"Enable debug output\"\n )\n\n parser.add_argument(\n \"-o\",\n \"--outputpath\",\n dest=\"output_path\",\n type=str,\n default=DEFAULT_OUTPUT_PATH,\n help=\"Output prefix for the generated results. \"\n + \"If the path is a folder files are put into it,\"\n + \"otherwise it is used as a prefix for the resulting files.\",\n )\n parser.add_argument(\n \"--resultFilePattern\",\n dest=\"result_file_pattern\",\n type=str,\n default=\"**\",\n help=\"Only files matching this glob pattern are transported back to the client.\",\n )\n\n parser.add_argument(\n \"-T\",\n \"--timelimit\",\n dest=\"timelimit\",\n default=None,\n type=util.parse_timespan_value,\n help=\"Time limit in seconds\",\n metavar=\"SECONDS\",\n )\n\n parser.add_argument(\n \"-M\",\n \"--memorylimit\",\n dest=\"memorylimit\",\n default=None,\n type=util.parse_memory_value,\n help=\"Memory limit\",\n metavar=\"BYTES\",\n )\n\n parser.add_argument(\n \"-c\",\n \"--corelimit\",\n dest=\"corelimit\",\n type=int,\n default=None,\n metavar=\"N\",\n help=\"Limit the tool to N CPU cores.\",\n )\n\n parser.add_argument(\n \"--version\", action=\"version\", version=\"%(prog)s \" + __version__\n )\n return parser", "def get_argparser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--fast', action='store_true', help='Run on extremely reduced dataset')\n parser.add_argument('--seed', type=int, default=42, help='Random seed')\n parser.add_argument('--split-seed', type=int, default=1234, help='Random seed for train/val split')\n parser.add_argument('-a', '--augmentation', default='none', help='Augmentation used. Possible values: hard, medium, light, safe, none')\n parser.add_argument('-abn', '--abn', default='default', help='Use of activate + batch_norm block. Values: default, inplace, inplace_sync')\n parser.add_argument('-b', '--batch-size', type=int, default=32, help='Batch Size during training, e.g. -b 64')\n parser.add_argument('-bm', '--border-mode', type=str, default='reflect', help='Border mode. Either constant|reflect')\n parser.add_argument('-d', '--dataset', type=str, default='image_only', help='image_only, image_depth, image_cumsum, image_depth_cumsum')\n parser.add_argument('-de', '--drop-empty', action='store_true')\n parser.add_argument('-df', '--drop-few', default=None, type=int)\n parser.add_argument('-dv', '--drop-vstrips', action='store_true')\n parser.add_argument('-e', '--epochs', type=int, default=150, help='Epoch to run')\n parser.add_argument('-es', '--early-stopping', type=int, default=None, help='Maximum number of epochs without improvement')\n parser.add_argument('-f', '--fold', default=None, type=int, help='Fold to train')\n parser.add_argument('-fe', '--freeze-encoder', type=int, default=0, help='Freeze encoder parameters for N epochs')\n parser.add_argument('-fm', '--fix-masks', action='store_true')\n parser.add_argument('-ft', '--fine-tune', action='store_true')\n parser.add_argument('-l', '--loss', type=str, default='bce', help='Loss (lovasz, bce_iou)')\n parser.add_argument('-lr', '--learning-rate', type=float, default=1e-3, help='Initial learning rate')\n parser.add_argument('-lrs', '--lr-scheduler', default=None, help='LR scheduler')\n parser.add_argument('-m', '--model', required=True, type=str, help='Name of the model')\n parser.add_argument('-multi-gpu', '--multi-gpu', action='store_true')\n parser.add_argument('-nc', '--num-classes', default=1, type=int, help='Run on extremely reduced dataset')\n parser.add_argument('-nd', '--no-dropout', action='store_true', help='Disable dropout (if model has it)')\n parser.add_argument('-npt', '--no-pretrain', action='store_true', help='Disables use of pretrain weights for encoders')\n parser.add_argument('-o', '--optimizer', default='Adam', help='Name of the optimizer')\n parser.add_argument('-p', '--prepare', type=str, default='128', help='Possible tile preparations (128, 128pad, 224, 224pad, 256, 256pad)')\n parser.add_argument('-r', '--resume', type=str, default=None, help='Checkpoint filename to resume')\n parser.add_argument('-re', '--restart-every', type=int, default=-1, help='Restart optimizer every N epochs')\n parser.add_argument('-s', '--stratify', default=None, type=str, help='Stratification class. One of: coverage, depth')\n parser.add_argument('-tm', '--target-metric', type=str, default='val_lb', help='Target metric to use for storing snapshots')\n parser.add_argument('-w', '--workers', default=0, type=int, help='Num workers')\n parser.add_argument('-wd', '--weight-decay', type=float, default=0, help='L2 weight decay')\n parser.add_argument('-x', '--experiment', type=str, help='Name of the experiment')\n\n return parser", "def getArgumentParser():\n parser = argparse.ArgumentParser(description=\"Script for running optimization for the ZH dark photon SR\")\n parser.add_argument('-i',\n '--infile',\n dest='infile',\n help='Input CSV file',\n default = '/afs/cern.ch/user/e/ehofgard/public/data/all_data')\n parser.add_argument('-o',\n '--output',\n dest='outdir',\n help='Output directory for plots, selection lists, etc',\n default='outdir')\n \n return parser", "def setup_args() -> argparse.ArgumentParser:\n main_parser = argparse.ArgumentParser(prog=\"gh\")\n subparsers = main_parser.add_subparsers(dest=\"subparser\")\n command_parser = subparsers.add_parser(\"commands\", help=\"Runs a command\")\n command_parser.add_argument(\n \"choice\",\n help=\"The chosen command to run\",\n choices=gh.commands.OPTIONS.keys(),\n )\n analytics_parser = subparsers.add_parser(\"analytics\", help=\"Runs an analysis\")\n analytics_parser.add_argument(\n \"choice\",\n help=\"The chosen analysis to run\",\n choices=gh.analytics.OPTIONS.keys(),\n )\n return main_parser", "def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--getErrors\",\n type=str,\n default=None,\n help=\"get error messages - send \\'yes\\' \")\n parser.add_argument(\"--host\",\n type=str,\n default=\"localhost\",\n help=\"Host of redis. Default : localhost\")\n parser.add_argument(\"--port\",\n type=int,\n default=6379,\n help=\"Port of redis. Default : 6379\")\n parser.add_argument(\"--db\",\n type=int,\n default=0,\n help=\"Db of redis. Default : 0\")\n parser.add_argument(\"--cleanTemp\",\n type=str,\n default=None,\n help=\"clean trash files from db - send \\'yes\\' \")\n return parser", "def init_parser():\n parser = OptionParser()\n\n parser.add_option(\n \"-d\",\n \"--debug\",\n dest=\"debug\",\n help=\"Toggle debugging\",\n action=\"store_true\",\n default=False,\n )\n\n parser.add_option(\n \"-f\",\n \"--questions-file\",\n dest=\"file\",\n help=(\"Use this file instead of the default \"\n \"questions.yaml\"),\n metavar=\"FILE\",\n )\n\n parser.add_option(\n \"-p\",\n \"--generate-pdf\",\n dest=\"pdf\",\n help=(\"Generate the speaker PDF\"),\n action=\"store_true\",\n default=False,\n )\n\n parser.add_option(\n \"-v\",\n \"--version\",\n dest=\"version\",\n help=\"Show program version\",\n action=\"store_true\",\n default=False,\n )\n\n options = parser.parse_args()[0]\n return options", "def parse_arguments():\n\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('benchmark', type=str, help='Benchmark file.')\n parser.add_argument('-m', '--model', type=str, help='Model file.')\n parser.add_argument(\n '-o', '--output', type=str, default='browser', help='Output style (browser/html).')\n parser.add_argument('-c', '--cache', type=str, help='Load specified cache file.')\n parser.add_argument('-s', '--save', type=str, help='Store results as csv table.')\n parser.add_argument(\n '-cn', '--classname', type=str,\n help='Load a specific class from a folder containing multiple classes.')\n\n args = vars(parser.parse_args())\n\n if not args['model'] and not args['benchmark']:\n print('ERROR: Must specify either model or benchmark.')\n parser.print_help()\n sys.exit(99)\n\n return args", "def create_argument_parser() -> argparse.ArgumentParser:\n\n parser = argparse.ArgumentParser(\n prog=\"mafiabot\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=\"Mafia Telegram Bot command line interface.\",\n )\n\n # parser.add_argument(\n # \"--argument\",\n # action=\"store_true\",\n # default=,\n # help=\"\",\n # )\n\n add_logging_options(parser)\n\n return parser", "def get_parser():\n p = argparse.ArgumentParser(description='such a good program')\n p.add_argument('infile')\n p.add_argument('outfile')\n return p", "def _setup_parser():\n parser = argparse.ArgumentParser(add_help=True)\n parser.add_argument('--eval_model', type=str, default=None)\n parser.add_argument('--stack', type=int, default=1)\n parser.add_argument('--flare', action='store_true')\n parser.add_argument('--mixreg', action='store_true')\n\n env_group = parser.add_argument_group(\"Env Args\")\n env_group.add_argument('--env_name', type=str, default=ENV_NAME)\n env_group.add_argument('--num_envs', type=int, default=NUM_ENVS)\n env_group.add_argument('--num_levels', type=int, default=NUM_LEVELS)\n env_group.add_argument('--start_level', type=int, default=START_LEVEL)\n\n agent_group = parser.add_argument_group(\"Agent Args\")\n PPOAgent.add_to_argparse(agent_group)\n\n model_group = parser.add_argument_group(\"Model Args\")\n ImpalaPPO.add_to_argparse(model_group)\n\n return parser", "def setup_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(description='evaluate dynamic shielding with water_tank benchmarks')\n\n parser.add_argument('--steps', type=int, default=int(2e5),\n help='number of steps that each environment is run.')\n parser.add_argument('--learning-rate', type=float, default=1e-3,\n help='learning rate')\n parser.add_argument('--shield', type=str, default='pre-adaptive',\n help='the shield to be used [pre-adaptive (default) | pre-dynamic | safe-padding | '\n 'post-dynamic | no]')\n parser.add_argument('--shield-life', type=int, default=100,\n help='frequency of shield reconstruction in terms of episodes.')\n parser.add_argument('--depths', nargs='+', default=[1, 3, 5, 7],\n help='a list of min-depths for dynamic shield (usage: --depths 0 1 3)')\n parser.add_argument('--penalties', nargs='+', default=[0.0, 1.0, 10.0, 100.0],\n help='a list of penalties that it is used in no shield (usage: --penalties 0.0 1.0 100.0)')\n return parser", "def create_arg_parser():\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument(\n '-f',\n '--file',\n required=True,\n help='Name of clean data file'\n )\n return arg_parser", "def get_test_parser():\n parser = argparse.ArgumentParser(description='Acceptability Test')\n\n parser.add_argument(\"-mf\", \"--model_file\", type=str, help=\"Model file to load\")\n parser.add_argument(\"-vf\", \"--vocab_file\", type=str, help=\"Vocab file to load\")\n parser.add_argument(\"-ef\", \"--embedding_file\", type=str, help=\"Embedding file to load\")\n parser.add_argument(\"-o\", \"--output_file\", type=str, help=\"Output file for model classifications.\")\n parser.add_argument(\"-d\", \"--dataset_path\", type=str, help=\"Test file\")\n parser.add_argument(\"-s\", \"--seed\", type=int, default=11111, help=\"Random seed\")\n parser.add_argument(\"-g\", \"--gpu\", action=\"store_true\", default=False, help=\"Use GPU\")\n parser.add_argument(\"--glove\", action=\"store_true\", default=False,\n help=\"Whether to use GloVE embeddings for models\")\n parser.add_argument(\"-e\", \"--embedding\", type=str, default=\"glove.840B.300d\",\n help=\"Embedding type to be used, select from\" +\n \"http://torchtext.readthedocs.io/en/latest/vocab.html#pretrained-aliases\")\n\n # Preprocess arguments\n parser.add_argument(\"--should_not_preprocess_data\", action=\"store_true\", default=False,\n help=\"Whether to preprocess data? Default: true (Will preprocess)\")\n parser.add_argument(\"--should_not_lowercase\", action=\"store_true\", default=False,\n help=\"Should lowercase data? Default: true (Will lowercase)\")\n parser.add_argument(\"--preprocess_tokenizer\", default='space', type=str,\n help=\"Type of tokenizer to use (space|nltk)\")\n parser.add_argument(\"-cp\", \"--crop_pad_length\", type=int, default=30,\n help=\"Padding Crop length\")\n return parser", "def parse_arguments():\n parser = argparse.ArgumentParser(prog='AdapterRunner', description='Adapter Runner Application')\n parser.add_argument('-a', '--application', action='store', dest='app_name', help='Application Name',\n metavar='<application_name>')\n parser.add_argument('-fi', '--fetch_interval', action='store', dest='fetch_stats_interval', help='Fetch Stats Interval',\n metavar='<fetch_interval in seconds>')\n return parser.parse_args()", "def parser():\n parser = ArgumentParser()\n parser.add_argument('dir_jsons', help='dir containing json files')\n parser.add_argument('dir_out', help='output directory')\n parser.add_argument('file_name', help='name of HTML file')\n return parser", "def arg_parser():\n import argparse\n return argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)", "def build_argparser():\n parser = ArgumentParser()\n parser.add_argument(\"-m\", \"--model\", required=True, type=str,\n help=\"Path to an xml file with a trained model.\")\n parser.add_argument(\"-i\", \"--input\", required=True, type=str,\n help=\"Path to image or video file\")\n parser.add_argument(\"-l\", \"--cpu_extension\", required=False, type=str,\n default=None,\n help=\"MKLDNN (CPU)-targeted custom layers.\"\n \"Absolute path to a shared library with the\"\n \"kernels impl.\")\n parser.add_argument(\"-d\", \"--device\", type=str, default=\"CPU\",\n help=\"Specify the target device to infer on: \"\n \"CPU, GPU, FPGA or MYRIAD is acceptable. Sample \"\n \"will look for a suitable plugin for device \"\n \"specified (CPU by default)\")\n parser.add_argument(\"-pt\", \"--prob_threshold\", type=float, default=0.3,\n help=\"Probability threshold for detections filtering\"\n \"(0.3 by default)\")\n return parser", "def build_argparser():\n parser = ArgumentParser()\n parser.add_argument(\"-m\", \"--model\", required=True, type=str,\n help=\"Path to an xml file with a trained model.\")\n parser.add_argument(\"-i\", \"--input\", required=True, type=str,\n help=\"Path to image or video file\")\n parser.add_argument(\"-l\", \"--cpu_extension\", required=False, type=str,\n default=None,\n help=\"MKLDNN (CPU)-targeted custom layers.\"\n \"Absolute path to a shared library with the\"\n \"kernels impl.\")\n parser.add_argument(\"-d\", \"--device\", type=str, default=\"CPU\",\n help=\"Specify the target device to infer on: \"\n \"CPU, GPU, FPGA or MYRIAD is acceptable. Sample \"\n \"will look for a suitable plugin for device \"\n \"specified (CPU by default)\")\n parser.add_argument(\"-pt\", \"--prob_threshold\", type=float, default=0.5,\n help=\"Probability threshold for detections filtering\"\n \"(0.5 by default)\")\n return parser", "def build_argparser():\n parser = ArgumentParser()\n parser.add_argument(\"-m\", \"--model\", required=True, type=str,\n help=\"Path to an xml file with a trained model.\")\n parser.add_argument(\"-i\", \"--input\", required=True, type=str,\n help=\"Path to image or video file\")\n parser.add_argument(\"-l\", \"--cpu_extension\", required=False, type=str,\n default=None,\n help=\"MKLDNN (CPU)-targeted custom layers.\"\n \"Absolute path to a shared library with the\"\n \"kernels impl.\")\n parser.add_argument(\"-d\", \"--device\", type=str, default=\"CPU\",\n help=\"Specify the target device to infer on: \"\n \"CPU, GPU, FPGA or MYRIAD is acceptable. Sample \"\n \"will look for a suitable plugin for device \"\n \"specified (CPU by default)\")\n parser.add_argument(\"-pt\", \"--prob_threshold\", type=float, default=0.5,\n help=\"Probability threshold for detections filtering\"\n \"(0.5 by default)\")\n return parser", "def build_argparser():\n parser = ArgumentParser()\n parser.add_argument(\"-m\", \"--model\", required=True, type=str,\n help=\"Path to an xml file with a trained model.\")\n parser.add_argument(\"-i\", \"--input\", required=True, type=str,\n help=\"Path to image or video file\")\n parser.add_argument(\"-l\", \"--cpu_extension\", required=False, type=str,\n default=None,\n help=\"MKLDNN (CPU)-targeted custom layers.\"\n \"Absolute path to a shared library with the\"\n \"kernels impl.\")\n parser.add_argument(\"-d\", \"--device\", type=str, default=\"CPU\",\n help=\"Specify the target device to infer on: \"\n \"CPU, GPU, FPGA or MYRIAD is acceptable. Sample \"\n \"will look for a suitable plugin for device \"\n \"specified (CPU by default)\")\n parser.add_argument(\"-pt\", \"--prob_threshold\", type=float, default=0.5,\n help=\"Probability threshold for detections filtering\"\n \"(0.5 by default)\")\n return parser", "def __create_parser_arguments(parser: argparse.ArgumentParser):\n parser.add_argument('-p', '--number-processes', type=int, default=4,\n help='specify the number of processes used')\n parser.add_argument('-s', '--measurement-strategy', type=str,\n default=MeasurementStrategy.classic.value,\n choices=(MeasurementStrategy.classic.aliases() +\n MeasurementStrategy.anticipated.aliases() +\n MeasurementStrategy.aggressive.aliases() +\n MeasurementStrategy.forced.aliases()),\n help='''\nThe measurement strategy:\n\n- anticipated and aggressive: are basically the same and perform measurements even though a measurement resulting in a not reachable target exists\n- classic: does not perform a measurement in the above case\n- forced: always perform a measurement\n ''')\n parser.add_argument('-n', '--domain-block-limit', type=int, default=1000,\n help='The number of domains taken per block to process them')\n parser.add_argument('-q', '--ripe-request-limit', type=int,\n help='How many request should normally be allowed per second '\n 'to the ripe server', default=25)\n parser.add_argument('-b', '--ripe-request-burst-limit', type=int,\n help='How many request should at maximum be allowed per second'\n ' to the ripe server', default=40)\n parser.add_argument('-ml', '--measurement-limit', type=int,\n help='The amount of parallel RIPE Atlas measurements allowed',\n default=100)\n parser.add_argument('-ak', '--api-key', type=str,\n help='The RIPE Atlas Api key',\n default='1dc0b3c2-5e97-4a87-8864-0e5a19374e60')\n parser.add_argument('--bill-to', type=str,\n help='The RIPE Atlas Bill to address')\n parser.add_argument('-o', '--without-new-measurements', action='store_true',\n help='Evaluate the matches using only data/measurements already available '\n 'locally and remote')\n parser.add_argument('-ma', '--allowed-measurement-age', type=int, default=30*24*60*60,\n help='The allowed measurement age in seconds (Default 30 days)')\n parser.add_argument('-bt', '--buffer-time', type=float, default=constants.DEFAULT_BUFFER_TIME,\n help='The assumed amount of time spent in router buffers')\n parser.add_argument('-mp', '--measurement-packets', type=int, default=1,\n help='Amount of packets per measurement')\n parser.add_argument('-e', '--use-efficient-probes', action='store_true',\n help='sort probes after second hop latency and use the most efficient ones')\n parser.add_argument('-mt', '--probes-per-measurement', default=1, type=int,\n help='Maximum amount of probes used per measurement')\n parser.add_argument('-dpf', '--disable-probe-fetching', action='store_true',\n help='Debug argument to prevent getting ripe probes')\n parser.add_argument('--include-ip-encoded', action='store_true',\n help='Search also domains of type IP encoded')\n parser.add_argument('--stop-without-old-results', action='store_true',\n help='Do not measure for domains if there is no existing measurement')\n parser.add_argument('--endless-measurements', action='store_true',\n help='Should the list of IPs be reapeatedly scanned until the process is '\n 'closed')\n parser.add_argument('--random-domains', action='store_true',\n help='Select the domains to measure randomly')\n parser.add_argument('--debug', action='store_true', help='Use only one process and one thread')\n parser.add_argument('-l', '--log-file', type=str, default='check_locations.log',\n help='Specify a logging file where the log should be saved')\n parser.add_argument('-ll', '--log-level', type=str, default='INFO',\n choices=['NOTSET', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],\n help='Set the preferred log level')\n parser.add_argument('-dbn', '--database-name', type=str, default='hloc-measurements')\n parser.add_argument('--ip-filter-file', type=str,\n help='The file with the IPs which should be validated. '\n 'Only IPs which also have a domain entry in the database are '\n 'considered')", "def make_parser():\n parser_ = argparse.ArgumentParser(\n description=\"\"\"\n A tool to retrieve history from\n (almost) any browser on (almost) any platform\n\n██████╗ ██████╗ ██████╗ ██╗ ██╗███████╗███████╗██████╗ ██╗ ██╗██╗███████╗████████╗ ██████╗ ██████╗ ██╗ ██╗\n██╔══██╗██╔══██╗██╔═══██╗██║ ██║██╔════╝██╔════╝██╔══██╗ ██║ ██║██║██╔════╝╚══██╔══╝██╔═══██╗██╔══██╗╚██╗ ██╔╝\n██████╔╝██████╔╝██║ ██║██║ █╗ ██║███████╗█████╗ ██████╔╝█████╗███████║██║███████╗ ██║ ██║ ██║██████╔╝ ╚████╔╝\n██╔══██╗██╔══██╗██║ ██║██║███╗██║╚════██║██╔══╝ ██╔══██╗╚════╝██╔══██║██║╚════██║ ██║ ██║ ██║██╔══██╗ ╚██╔╝\n██████╔╝██║ ██║╚██████╔╝╚███╔███╔╝███████║███████╗██║ ██║ ██║ ██║██║███████║ ██║ ╚██████╔╝██║ ██║ ██║\n╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚══╝╚══╝ ╚══════╝╚══════╝╚═╝ ╚═╝ ╚═╝ ╚═╝╚═╝╚══════╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝\n \"\"\", # noqa: E501\n epilog=\"\"\"\n Checkout the GitHub repo\n https://github.com/pesos/browser-history\n if you have any issues or want to help contribute\"\"\",\n formatter_class=RawDescriptionHelpFormatter,\n )\n\n parser_.add_argument(\n \"-t\",\n \"--type\",\n default=\"history\",\n help=f\"\"\"\n argument to decide whether to retrieve history or bookmarks.\n Should be one of {AVAILABLE_TYPES}.\n Default is history.\"\"\",\n )\n parser_.add_argument(\n \"-b\",\n \"--browser\",\n default=\"all\",\n help=f\"\"\"\n browser to retrieve history or bookmarks from. Should be one\n of all, default, {AVAILABLE_BROWSERS}.\n Default is all (gets history or bookmarks from all browsers).\n \"\"\",\n )\n\n parser_.add_argument(\n \"-f\",\n \"--format\",\n default=\"infer\",\n help=f\"\"\"\n Format to be used in output. Should be one of {AVAILABLE_FORMATS}.\n Default is infer (format is inferred from the output file's\n extension. If no output file (-o) is specified, it defaults to csv)\"\"\",\n )\n\n parser_.add_argument(\n \"-o\",\n \"--output\",\n default=None,\n help=\"\"\"\n File where history output or bookmark output is to be written.\n If not provided, standard output is used.\"\"\",\n )\n\n parser_.add_argument(\n \"-p\",\n \"--profile\",\n default=None,\n help=\"\"\"\n Specify the profile from which to fetch history or bookmarks. If\n not provided all profiles are fetched\n \"\"\",\n )\n\n parser_.add_argument(\n \"--show-profiles\",\n default=None,\n metavar=\"BROWSER\",\n help=f\"\"\"\n List all available profiles for a given browser where browser\n can be one of default, {AVAILABLE_BROWSERS}. The browser\n must always be provided.\n \"\"\",\n )\n\n parser_.add_argument(\n \"-v\", \"--version\", action=\"version\", version=\"%(prog)s \" + __version__\n )\n\n return parser_", "def make_parser():\n parser = argparse.ArgumentParser(description=config.DESCRIPTION)\n parser.add_argument('url_file', metavar='URL_FILE', type=str,\n help=config.HELP_URL_FILE)\n parser.add_argument('-d', metavar='DEST_DIR', dest='destination_dir', default=config.DEFAULT_DESTINATION_DIR, type=str,\n help=config.HELP_DESTINATION_DIR)\n parser.add_argument('-l', metavar='LOG_FILE', dest='log_file', default=config.DEFAULT_LOG_FILE, type=str,\n help=config.HELP_LOG_FILE % config.DEFAULT_LOG_FILE)\n\n return parser", "def build_parser(self, parser: ArgumentParser) -> None:", "def create_arg_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-a', '--path_annots', type=str, required=False,\n help='path to folder with annotations',\n default='annotations')\n parser.add_argument('-i', '--path_dataset', type=str, required=False,\n help='path to folder with dataset (images)',\n default='dataset')\n parser.add_argument('-o', '--path_output', type=str, required=False,\n help='path to the output directory - visualisation',\n default='output')\n parser.add_argument('--consensus', type=str, required=False,\n help='method for consensus landmarks',\n choices=['mean', 'median'], default='mean')\n parser.add_argument('--visual', required=False, action='store_true',\n help='export co-annotation visualisation', default=False)\n parser.add_argument('--nb_jobs', type=int, required=False,\n help='number of processes in parallel',\n default=NB_THREADS)\n return parser", "def parse_args():\n parser = argparse.ArgumentParser(description=\"SLOWFAST for AVA Dataset\")\n parser.add_argument(\"--pipeline\", type=str,\n default=\"../data/config/slowfast.pipeline\", help=\"SDK infer pipeline\")\n parser.add_argument(\"--data_dir\", type=str, default=\"../data/input\",\n help=\"Dataset contain frames and ava_annotations\")\n args_opt = parser.parse_args()\n return args_opt", "def _create_parser(self):\n parser = argparse.ArgumentParser(\n description=description,\n formatter_class=argparse.RawTextHelpFormatter\n )\n\n parser.add_argument(\n '-v',\n '--verbose',\n action='store_true',\n default=False,\n help='Verbose mode (turn on logging.info)')\n\n parser.add_argument(\n '-d',\n '--debug',\n action='store_true',\n default=False,\n help='Debug (turn on logging.debug)')\n\n return parser", "def __init__(self, argparser):\n super().__init__()\n argparser.add_argument(\n \"-b\", \"--config-seed\", dest=\"config_seed\",\n help=\"configuration seed/blob\",\n type=str, default=constants.DEFAULT_CONFIG_SEED\n )\n argparser.add_argument(\n \"-e\", \"--config-variable\", dest=\"config_variable\",\n help=\"name of environment variable with config\",\n type=str, default=constants.DEFAULT_CONFIG_ENV_KEY\n )\n argparser.add_argument(\n \"-c\", \"--config-file\", dest=\"config_file\",\n help=\"path to config file\",\n type=str, default=constants.DEFAULT_CONFIG_PATH\n )\n argparser.add_argument(\n \"-s\", \"--suite\", dest=\"suite\",\n help=\"test suite to run\",\n type=str\n )\n argparser.add_argument(\n \"-l\", \"--list-suites\", dest=\"list_suites\",\n help=\"list available test suites\",\n action=\"store_true\"\n )", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('n_iter',\n help='number of iteration',\n type=int)\n parser.add_argument('n_processes',\n help='number of processes',\n type=int)\n parser.add_argument('method',\n help='mutual exclusion method')\n parser.add_argument('duration',\n help='Duration of each process',\n type=float)\n return parser.parse_args()", "def build_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-r', '--reference', required=True, help=\"Reference Genome URL\")\n parser.add_argument('-n', '--normal', required=True, help='Normal BAM URL. Format: UUID.normal.bam')\n parser.add_argument('-t', '--tumor', required=True, help='Tumor BAM URL. Format: UUID.tumor.bam')\n parser.add_argument('-d', '--dbsnp', required=True, help='dbsnp_132_b37.leftAligned.vcf URL')\n parser.add_argument('-c', '--cosmic', required=True, help='b37_cosmic_v54_120711.vcf URL')\n parser.add_argument('-u', '--mutect', required=True, help='Mutect.jar')\n parser.add_argument('-w', '--work_dir', required=True, help='Where you wanna work from? (full path please)')\n\n return parser" ]
[ "0.7983817", "0.7168031", "0.70897955", "0.70692337", "0.7047852", "0.6983982", "0.693783", "0.6929585", "0.6902446", "0.689302", "0.6882429", "0.6879765", "0.68780637", "0.68780637", "0.6868575", "0.6855423", "0.68533826", "0.6851654", "0.68308103", "0.6825723", "0.678807", "0.67698914", "0.67691976", "0.6765229", "0.67618215", "0.6760568", "0.67495626", "0.6745742", "0.67241704", "0.672046", "0.671869", "0.6708479", "0.66855645", "0.6676791", "0.66766006", "0.6672552", "0.6655742", "0.6654492", "0.6653579", "0.66431683", "0.66429704", "0.6628851", "0.6624113", "0.66124886", "0.66110605", "0.65974575", "0.65808773", "0.6580097", "0.657554", "0.65731055", "0.6569114", "0.65639013", "0.6560347", "0.65584993", "0.6552183", "0.654758", "0.6546959", "0.6544487", "0.654338", "0.65433127", "0.65420073", "0.6539945", "0.6530379", "0.6529142", "0.65286773", "0.6526408", "0.65257144", "0.65239733", "0.65235215", "0.6519068", "0.651699", "0.6516296", "0.6510656", "0.65072924", "0.64940786", "0.6494037", "0.649145", "0.64864844", "0.6485738", "0.64771837", "0.64735454", "0.6469035", "0.6466766", "0.64649457", "0.64567584", "0.644985", "0.644877", "0.6448607", "0.6448607", "0.6448607", "0.64485735", "0.64458084", "0.64428544", "0.64396626", "0.643937", "0.643735", "0.64344716", "0.64343023", "0.64336747", "0.6428148" ]
0.7317018
1
Implement the command line entrypoint for benchmarks.
def main(cls): parser = cls.make_argument_parser() args = parser.parse_args() args.device = make_hoomd_device(args) benchmark = cls(**vars(args)) performance = benchmark.execute() if args.device.communicator.rank == 0: print(f'{numpy.mean(performance)}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n logging.basicConfig(level=\"INFO\")\n assert len(sys.argv) == 2, \"Exactly one positional argument (path to the raw dataset) is \"\\\n \"needed. \\n\\nE.g. `python sparsity_benchmark ~/bff_data/final_table`\"\n\n # Prepares data for the benchmark, may take a while\n data_parameters = DATA_PARAMETERS.copy()\n data_parameters[\"input_file\"] = sys.argv[1]\n data_parameters[\"preprocessed_file\"] = os.path.join(\n os.path.dirname(data_parameters[\"input_file\"]),\n \"preprocessed_dataset.pkl\"\n )\n data_preprocessor = preprocess_dataset(data_parameters=data_parameters)\n\n # Note: the features here should be in range [0, ~1.2], according to the original experiments.\n # 0 corresponds to no data, everything else is linearly scaled from dB units.\n features, _ = data_preprocessor.load_dataset()\n\n logging.info(\"Starting benchmarks\")\n noisy_features = benchmark_noise(\n features=features,\n data_parameters=data_parameters,\n experiment_parameters=EXPERIMENT_PARAMETERS\n )\n benchmark_binarization(\n noisy_features=noisy_features,\n data_parameters=data_parameters,\n experiment_parameters=EXPERIMENT_PARAMETERS\n )\n logging.info(\"Done\")", "def main():\n parser = optparse.OptionParser()\n parser.add_option('--debug', action='store_true', default=False,\n help='run in debug mode')\n parser.add_option('-i', '--iteration', type=int, default=DEFAULT_ITERATION,\n metavar='NUM',\n help='set the number of iterations for each test (defualt:%d)' % \\\n DEFAULT_ITERATION)\n parser.add_option('-f', '--fstypes', default='ext2,ext3,ext4,btrfs,xfs',\n type='string', metavar='TYPES', help='set the file systems to test')\n parser.add_option('-n', '--num', default=10000, type=int, metavar='NUM',\n help='set the number of file created')\n parser.add_option('-N', '--numa', action='store_true', default=False,\n help='run NUMA test')\n parser.add_option('-S', '--scalability', action='store_true', default=False,\n help='run scalability test')\n global options\n options, args = parser.parse_args()\n\n benchutils.check_root_or_die()\n suffix = ''\n if options.numa:\n suffix = 'numa'\n else:\n suffix = 'scale'\n output_dir = benchutils.get_output_directory(suffix=suffix, timestamp=True)\n fstypes = options.fstypes.split(',')\n for fs in fstypes:\n if options.numa:\n run_tests(output_dir, fs)\n elif options.scalability:\n run_scalability_tests(output_dir, fs)", "def main(args):\n\n # Compose the model list\n modellist = []\n if args['model']:\n modellist.append(bmark.ModelInfo(args['model'], os.getcwd(), args['classname']))\n\n # Load the benchmark settings\n benchmark = None\n benchmark = bmark.load_benchmark(args['benchmark'])\n corresponding_data = False\n if 'corresponding_data' in benchmark:\n corresponding_data = benchmark['corresponding_data']\n\n # Only extend if not cached\n cache_df = None\n if not args['cache']:\n modellist.extend(benchmark['models'])\n else:\n cache_df = pd.read_csv(args['cache'])\n\n # Extract comparator settings from benchmark description\n eval_comparator = comparator.EqualityComparator()\n if 'comparator' in benchmark:\n if benchmark['comparator'] == 'nvc':\n eval_comparator = comparator.NVCComparator()\n\n # Run the model evaluation\n is_silent = (args['output'] in ['html', 'server'])\n eva = None\n if benchmark['type'] == 'adaption':\n eva = evaluator.AdaptionEvaluator(\n modellist,\n eval_comparator,\n benchmark['data.test'],\n train_datafile=benchmark['data.train'],\n train_data_person=benchmark['data.train_person'],\n silent=is_silent,\n corresponding_data=corresponding_data,\n domain_encoders=benchmark['domain_encoders'],\n cache_df=cache_df\n )\n elif benchmark['type'] == 'coverage':\n # Check for benchmark validity\n if benchmark['data.train'] or benchmark['data.train_person']:\n print('WARNING: Ignoring specified training and train_person data ' \\\n + 'for coverage evaluation...')\n\n eva = evaluator.CoverageEvaluator(\n modellist,\n eval_comparator,\n benchmark['data.test'],\n train_datafile=benchmark['data.train'],\n train_data_person=benchmark['data.train_person'],\n silent=is_silent,\n corresponding_data=corresponding_data,\n domain_encoders=benchmark['domain_encoders'],\n cache_df=cache_df\n )\n else:\n raise ValueError('Unknown benchmark type: {}'.format(benchmark['type']))\n\n with silence_stdout(is_silent):\n res_df = eva.evaluate()\n\n if 'save' in args:\n res_df.to_csv(args['save'], index=False)\n\n # Run the metric visualizer\n htmlcrtr = html_creator.HTMLCreator([\n viz_plot.AccuracyVisualizer(),\n viz_plot.BoxplotVisualizer(),\n viz_plot.TableVisualizer()\n ])\n\n # Prepare the benchmark output information and visualize the evaluation results\n benchmark_info = {\n 'name': os.path.basename(args['benchmark']),\n 'data.train': os.path.basename(\n benchmark['data.train']) if benchmark['data.train'] else '',\n 'data.train_person': os.path.basename(\n benchmark['data.train_person']) if benchmark['data.train_person'] else '',\n 'data.test': os.path.basename(benchmark['data.test']),\n 'type': benchmark['type'],\n 'corresponding_data': benchmark['corresponding_data'],\n 'domains': list(res_df['domain'].unique()),\n 'response_types': list(res_df['response_type'].unique()),\n }\n\n if args['output'] == 'browser':\n html = htmlcrtr.to_html(res_df, benchmark_info, embedded=False)\n server.load_in_default_browser(html.encode('utf8'))\n elif args['output'] == 'server':\n html = htmlcrtr.to_html(res_df, benchmark_info, embedded=True)\n sys.stdout.buffer.write(html.encode('utf-8'))\n elif args['output'] == 'html':\n html = htmlcrtr.to_html(res_df, benchmark_info, embedded=False)\n print(html)", "def benchmark():\n parser = argparse.ArgumentParser(\n \n description='pyrpipe diagnostic utility\\nGenerate benchmark report.',\n \n usage='''pyrpipe_diagnostic report [<args>] <logfile>\n \n ''') \n parser.add_argument('-o', help='out file \\ndefault: same as input logfile',action=\"store\")\n parser.add_argument('-e', help='report output type: [MD,PDF,HTML] \\ndefault: PDF',default='PDF',action=\"store\")\n parser.add_argument('-v',help='verbose',action=\"store_true\")\n parser.add_argument('-f',help='Filter by programs. Provide a comma-separated list e.g., prefetch,STAR,bowtie2 \\ndefault None')\n parser.add_argument('-t',help='Temporary directory. \\ndefault ./tmp',action=\"store\")\n parser.add_argument('logfile', help='The log file generated by pyrpipe',action=\"store\")\n args = parser.parse_args(sys.argv[2:])\n \n logFile=args.logfile\n envLog=reports.checkEnvLog(logFile) \n #parse args\n vFlag=args.v\n if vFlag:\n print(\"Generating benchmarks\")\n outFile=\"\"\n if args.o is None:\n outFile=pu.get_file_basename(args.logfile)\n else:\n outFile=args.o\n outFile+='.'+args.e\n \n filters=[]\n if args.f is not None:\n filters= args.f.split(',')\n #create temp dir\n tempDir=\"\"\n if args.t is not None:\n tempDir= args.t\n else:\n tempDir=os.path.join(os.getcwd(),\"tmp\")\n #create tmp dir\n if not pu.check_paths_exist(tempDir):\n pu.mkdir(tempDir)\n \n reports.generateBenchmarkReport(logFile,envLog,filters,tempDir,outFile=outFile,verbose=args.v)", "def main():\n configuration = {'resource-folder': 'resources',\n 'build-folder': 'build',\n 'log-folder': 'logfiles',\n 'use-preloaded': False,\n 'addi-metrics': 'addi-metrics.json',\n 'jenkins': {'dependency-filename': 'dependencies.txt',\n 'server': 'http://is.dbc.dk',\n 'repository-project': 'opensearch-3rd-party-dependencies'},\n 'log-zip-file':'logs.zip'}\n configuration.update(cli())\n setup_logger(configuration['verbose'])\n run_performance_test(configuration)", "def run_benchmark():\n import argparse\n parser = argparse.ArgumentParser(description='Benchmark alchemically modified system against unmodified system.')\n parser.add_argument('--platform', dest='platform_name', action='store', default=None, help='platform name to benchmark (default: None)')\n options = parser.parse_args()\n\n from sams.tests import testsystems\n for testsystem_name in ['AblImatinibExplicitAlchemical']:\n cls = getattr(testsystems, testsystem_name)\n testsystem = cls()\n factory_args = { 'ligand_atoms' : testsystem.alchemical_atoms, 'receptor_atoms' : range(0,4266) }\n benchmark(testsystem.system, testsystem.positions, platform_name=options.platform_name, nsteps=5000, timestep=1.0*unit.femtoseconds, factory_args=factory_args)", "def main():\n known_args, unknown_args = parse_known_args()\n if not unknown_args:\n # return an error message if no command is provided\n sys.exit(\"Please provide a command to benchmark: $ humann_benchmark COMMAND\")\n try:\n process = subprocess.Popen(\" \".join(unknown_args),shell=True)\n except (EnvironmentError, subprocess.CalledProcessError):\n sys.exit(\"Unable to execute command: \" + \" \".join(unknown_args))\n pid=str(process.pid)\n start=time.time()\n max_memory=0\n while process.poll() is None:\n time.sleep(1)\n # while the process is running check on the memory use\n # get the pids of the main process and all children (and their children)\n pids=get_pids(pid)\n stdout=subprocess.check_output([\"ps\",\"--pid\",\",\".join(pids),\"-o\",\"pid,rss,command\"]).decode(\"utf-8\")\n print(\"\\n\"+stdout+\"\\n\")\n # remove the header from the process output\n status=[i.split() for i in filter(lambda x: x, stdout.split(\"\\n\")[1:])]\n # memory is the sum of all rss\n memory=sum(int(i[1]) for i in status)\n if memory > max_memory:\n max_memory=memory\n \n end=time.time()\n print(\"Time: {:.0f} minutes\".format((end-start)/60))\n print(\"Max Memory (RSS): {:.1f} GB\".format(max_memory*1.0/1024**2))", "def main():\n test_runner = TestRunner(\n FLAGS.workspace, FLAGS.bench_home, imagenet_dir=FLAGS.train_data_dir)\n test_runner.run_tests(FLAGS.test_list.split(','))", "def benchmark(self):\n logger.info(self.benchmark.__doc__)\n return self.run(self.benchmark_profile())", "def run_benchmark(env: Env, in_file):\n\n print('Running benchmarks in', in_file.name)\n # Run file_path through mlir_to_bef and bef_executor and extract the\n # benchmark result.\n return env.run_mlir(in_file.read())", "def _run():\n subprocess.check_call(\n [\n \"tools/bazel\",\n \"build\",\n \"-c\",\n \"opt\",\n \"test/core/memory_usage/memory_usage_test\",\n ]\n )\n ret = {}\n for name, benchmark_args in _BENCHMARKS.items():\n for scenario, extra_args in _SCENARIOS.items():\n # TODO(chenancy) Remove when minstack is implemented for channel\n if name == \"channel\" and scenario == \"minstack\":\n continue\n try:\n output = subprocess.check_output(\n [\n \"bazel-bin/test/core/memory_usage/memory_usage_test\",\n ]\n + benchmark_args\n + extra_args\n )\n except subprocess.CalledProcessError as e:\n print(\"Error running benchmark:\", e)\n continue\n for line in output.splitlines():\n for key, (pattern, conversion) in _INTERESTING.items():\n m = re.match(pattern, line)\n if m:\n ret[scenario + \": \" + key] = conversion(m.group(1))\n return ret", "def main(benchmark, size=None, backend=None, repetitions=None, burnin=1, device=\"cpu\"):\n try:\n bm_module, bm_identifier = get_benchmark_module(benchmark)\n except ImportError as e:\n click.echo(f\"Error while loading benchmark {benchmark}: {e!s}\", err=True)\n raise click.Abort()\n\n available_backends = set(bm_module.__implementations__)\n\n if len(backend) == 0:\n backend = available_backends.copy()\n else:\n backend = set(backend)\n\n unsupported_backends = [b for b in backend if b not in available_backends]\n\n for b in unsupported_backends:\n click.echo(\n f'Backend \"{b}\" is not supported by chosen benchmark (skipping)', err=True\n )\n backend.remove(b)\n\n for b in backend.copy():\n try:\n with setup_functions[b](device=device) as bmod:\n click.echo(f\"Using {b} version {bmod.__version__}\")\n except BackendNotSupported as e:\n click.echo(\n f'Setup for backend \"{b}\" failed (skipping), reason: {e!s}', err=True\n )\n backend.remove(b)\n\n try:\n check_backend_conflicts(backend, device)\n except BackendConflict as exc:\n click.echo(f\"Backend conflict: {exc!s}\", err=True)\n raise click.Abort()\n\n runs = sorted(itertools.product(backend, size))\n\n if len(runs) == 0:\n click.echo(\"Nothing to do\")\n return\n\n timings = {run: [] for run in runs}\n\n if repetitions is None:\n click.echo(\"Estimating repetitions...\")\n repetitions = {}\n\n for b, s in runs:\n # use end-to-end runtime for repetition estimation\n def run_func():\n run = bm_module.get_callable(b, s, device=device)\n with setup_functions[b](device=device):\n run()\n\n repetitions[(b, s)] = estimate_repetitions(run_func)\n else:\n repetitions = {(b, s): repetitions for b, s in runs}\n\n all_runs = list(\n itertools.chain.from_iterable(\n [run] * (repetitions[run] + burnin) for run in runs\n )\n )\n random.shuffle(all_runs)\n\n results = {}\n checked = {r: False for r in runs}\n\n pbar = click.progressbar(\n label=f\"Running {len(all_runs)} benchmarks...\", length=len(runs)\n )\n\n try:\n with pbar:\n for (b, size) in all_runs:\n with setup_functions[b](device=device):\n run = bm_module.get_callable(b, size, device=device)\n with Timer() as t:\n res = run()\n\n # YOWO (you only warn once)\n if not checked[(b, size)]:\n if size in results:\n is_consistent = check_consistency(\n results[size], convert_to_numpy(res, b, device)\n )\n if not is_consistent:\n click.echo(\n f\"\\nWarning: inconsistent results for size {size}\",\n err=True,\n )\n else:\n results[size] = convert_to_numpy(res, b, device)\n checked[(b, size)] = True\n\n timings[(b, size)].append(t.elapsed)\n pbar.update(1.0 / (repetitions[(b, size)] + burnin))\n\n # push pbar to 100%\n pbar.update(1.0)\n\n for run in runs:\n assert len(timings[run]) == repetitions[run] + burnin\n\n finally:\n stats = compute_statistics(timings)\n click.echo(format_output(stats, bm_identifier, device=device))", "def main( argv = None ):\n\n if argv == None: argv = sys.argv\n\n # setup command line parser\n parser = E.OptionParser( version = \"%prog version: $Id$\", \n usage = globals()[\"__doc__\"] )\n\n parser.add_option(\"--category\", dest=\"category\", type=\"choice\",\n choices = (\"B\", \"C\"), help=\"supply help\" )\n\n ## add common options (-h/--help, ...) and parse command line \n (options, args) = E.Start( parser, argv = argv )\n\n data = getData(options.stdin)\n if options.category == \"B\":\n options.stdout.write(\"Category B pathway\\tKO\\tGenes\\tDescriptions\\n\")\n for pathway, descriptions in b2ko(data).iteritems():\n options.stdout.write(\"\\t\".join([pathway, \"; \".join(descriptions[0]), \"; \".join(descriptions[1]), \"; \".join(descriptions[2])]) + \"\\n\")\n\n elif options.category == \"C\":\n options.stdout.write(\"Category C pathway\\tKO\\tGenes\\tDescriptions\\n\")\n for pathway, descriptions in c2ko(data).iteritems():\n options.stdout.write(\"\\t\".join([pathway, \"; \".join(descriptions[0]), \"; \".join(descriptions[1]), \"; \".join(descriptions[2])]) + \"\\n\")\n else:\n raise ValueError(\"must specify the category of pathway\")\n\n\n ## write footer and output benchmark information.\n E.Stop()", "def main(argv):\n\n print('Preparing for balanced downsampler indexer by factor')\n gdal.UseExceptions()\n dataset_folder = None\n storage_folder = None\n tactic = TACTIC_DOWNSAMPLE\n operation = OPERATION_MIX\n beginning = 5\n ending = 100\n jump = 5\n iterations = 10\n\n try:\n opts, args = getopt.getopt(argv, \"hs:d:t:cafmob:e:j:i:\",\n [\"dataset_folder=\", \"storage_folder=\", \"tactic=\", \"create\", \"analyze\",\n \"full_analyze\", \"mix\", \"out\", \"begin=\", \"end=\", \"jump=\", \"iterations=\"])\n except getopt.GetoptError:\n print(\n 'balanced_factor_indexer.py -s <dataset_folder> -d <storage_folder> -t {upsample/downsample} -m -b <beginning_percentage> -e <ending_percentage -j <jump_between_samples> -i <number_of_iterations>')\n sys.exit(2)\n for opt, arg in opts:\n if opt == \"-h\":\n print(\n 'balanced_factor_indexer.py -s <dataset_folder> -d <storage_folder> -t {upsample/downsample} -m -b <beginning_percentage> -e <ending_percentage -j <jump_between_samples> -i <number_of_iterations>')\n sys.exit()\n elif opt in [\"-s\", \"--dataset_folder\"]:\n dataset_folder = arg\n elif opt in [\"-d\", \"--storage_folder\"]:\n storage_folder = arg\n elif opt in [\"-t\", \"--tactic\"]:\n if arg == 'upsample':\n tactic = TACTIC_UPSAMPLE\n elif arg == 'downsample':\n tactic = TACTIC_DOWNSAMPLE\n else:\n tactic = TACTIC_NONE\n elif opt in [\"-c\", \"--create\"]:\n operation = OPERATION_CREATE\n elif opt in [\"-a\", \"--analyze\"]:\n operation = OPERATION_ANALYZE\n elif opt in [\"-f\", \"--full_analyze\"]:\n operation = OPERATION_FULLANALYZE\n elif opt in [\"-m\", \"--mix\"]:\n operation = OPERATION_MIX\n elif opt in [\"-o\", \"--summarize\"]:\n operation = OPERATION_SUMMARIZE\n elif opt in [\"-b\", \"--beginning\"]:\n beginning = int(arg)\n elif opt in [\"-e\", \"--ending\"]:\n ending = int(arg)\n elif opt in [\"-j\", \"--jump\"]:\n jump = int(arg)\n elif opt in [\"-i\", \"--iterations\"]:\n iterations = int(arg)\n\n print('Working with dataset folder %s' % dataset_folder)\n\n if operation == OPERATION_CREATE or operation == OPERATION_MIX:\n indexes_creator(dataset_folder, tactic, storage_folder, beginning, ending, jump, iterations)\n if operation == OPERATION_FULLANALYZE or operation == OPERATION_MIX:\n full_dataset_analyzer(dataset_folder, storage_folder, tactic)\n if operation == OPERATION_ANALYZE or operation == OPERATION_MIX:\n dataset_analyzer(dataset_folder, storage_folder, beginning, ending, jump)\n if operation == OPERATION_SUMMARIZE or operation == OPERATION_MIX:\n analysis_summarizer(storage_folder, beginning, ending, jump)\n\n sys.exit()", "def main():\n opt = parse_opts()\n run(opt)", "def main():\n opt = parse_opts()\n run(opt)", "def main():\n parser = argparse.ArgumentParser(\n description='Runs test for C++ implementation of M*')\n parser.add_argument('test_file', help='File describing test cases')\n parser.add_argument('output_file', help='Name of output file')\n parser.add_argument('num_processors', type=int, action='store',\n help='Number of processes to run on each node. ' +\n 'The local host running the primary server will ' +\n 'run one fewer worker processes')\n parser.add_argument('-i', action='store', type=float, default=1.0,\n help='Set inflation factor for the heuristic, ' +\n 'defaults to 1', metavar='INF', dest='inflation')\n parser.add_argument('-t', action='store', type=int, default=120,\n help='Set time limit for planning. Defaults to 2 ' +\n 'minutes', dest='time_limit')\n parser.add_argument('--hosts', action='store',\n default=('python', 'cobra', 'viper', 'anaconda'),\n help='Hostnames/IPs to use as processing nodes.',\n nargs='*', metavar='HOSTNAME')\n\n args = parser.parse_args()\n\n run_cpp_mstar_trial(args.test_file, args.output_file,\n inflation=args.inflation, time_limit=args.time_limit,\n hosts=args.hosts, num_processors=args.num_processors)", "def _initialise_testbench(argv_):\n _rlock.acquire()\n\n if \"COCOTB_LIBRARY_COVERAGE\" in os.environ:\n import coverage\n\n global _library_coverage\n _library_coverage = coverage.coverage(\n data_file=\".coverage.cocotb\",\n branch=True,\n include=[\"{}/*\".format(os.path.dirname(__file__))])\n _library_coverage.start()\n\n global argc, argv\n argv = argv_\n argc = len(argv)\n\n root_name = os.getenv(\"TOPLEVEL\")\n if root_name is not None:\n if root_name == \"\":\n root_name = None\n elif '.' in root_name:\n # Skip any library component of the toplevel\n root_name = root_name.split(\".\", 1)[1]\n\n # sys.path normally includes \"\" (the current directory), but does not appear to when python is embedded.\n # Add it back because users expect to be able to import files in their test directory.\n # TODO: move this to gpi_embed.cpp\n sys.path.insert(0, \"\")\n\n _setup_logging()\n\n # From https://www.python.org/dev/peps/pep-0565/#recommended-filter-settings-for-test-runners\n # If the user doesn't want to see these, they can always change the global\n # warning settings in their test module.\n if not sys.warnoptions:\n warnings.simplefilter(\"default\")\n\n from cocotb import simulator\n\n global SIM_NAME, SIM_VERSION\n SIM_NAME = simulator.get_simulator_product().strip()\n SIM_VERSION = simulator.get_simulator_version().strip()\n\n cocotb.log.info(\"Running on {} version {}\".format(SIM_NAME, SIM_VERSION))\n\n memcheck_port = os.getenv('MEMCHECK')\n if memcheck_port is not None:\n mem_debug(int(memcheck_port))\n\n log.info(\"Running tests with cocotb v%s from %s\" %\n (__version__, os.path.dirname(__file__)))\n\n # Create the base handle type\n\n process_plusargs()\n\n global scheduler\n scheduler = Scheduler()\n\n # Seed the Python random number generator to make this repeatable\n global RANDOM_SEED\n RANDOM_SEED = os.getenv('RANDOM_SEED')\n\n if RANDOM_SEED is None:\n if 'ntb_random_seed' in plusargs:\n RANDOM_SEED = eval(plusargs['ntb_random_seed'])\n elif 'seed' in plusargs:\n RANDOM_SEED = eval(plusargs['seed'])\n else:\n RANDOM_SEED = int(time.time())\n log.info(\"Seeding Python random module with %d\" % (RANDOM_SEED))\n else:\n RANDOM_SEED = int(RANDOM_SEED)\n log.info(\"Seeding Python random module with supplied seed %d\" % (RANDOM_SEED))\n random.seed(RANDOM_SEED)\n\n # Setup DUT object\n from cocotb import simulator\n\n handle = simulator.get_root_handle(root_name)\n if not handle:\n raise RuntimeError(\"Can not find root handle ({})\".format(root_name))\n\n global top\n top = cocotb.handle.SimHandle(handle)\n\n try:\n import pytest\n except ImportError:\n log.warning(\"Pytest not found, assertion rewriting will not occur\")\n else:\n try:\n # Install the assertion rewriting hook, which must be done before we\n # import the test modules.\n from _pytest.config import Config\n from _pytest.assertion import install_importhook\n pytest_conf = Config.fromdictargs([], {})\n install_importhook(pytest_conf)\n except Exception:\n log.exception(\n \"Configuring the assertion rewrite hook using pytest {} failed. \"\n \"Please file a bug report!\".format(pytest.__version__))\n\n # start Regression Manager\n global regression_manager\n regression_manager = RegressionManager.from_discovery(top)\n regression_manager.execute()\n\n _rlock.release()\n return True", "def main():\n DataClasses = [FamilyStats, SeqHdrStats, UniProtStats]\n CmdLineOps, Args = parse_command_line_options()\n ThreadManager(DataClasses)\n print_results(DataClasses, CmdLineOps.output_file)\n return", "def main():\n logging.info(\"Testing iOS application performance metrics: application size, launch duration and RAM memory usage!\")\n\n try:\n args = parse_args()\n\n TEST_RESULTS = run_tests(args)\n test_summary = create_test_summary(args, TEST_RESULTS)\n write_results_to_file(TEST_RESULTS, RESULTS_FILE, test_summary, SUMMARY_FILE)\n report_tests(args, test_summary)\n\n except Exception as e:\n logging.error(\"Testing performance of application failed with error '{ERROR}'\".format(ERROR=e))", "def main(args=None):", "def main(args=None):", "def main(ctx: click.Context):\n click.secho(\"MySQL Benchmark\", bold=True)\n results = []\n with click.progressbar(range(ctx.obj[\"count\"])) as bar:\n for number in bar:\n response = requests.get(url=f'{ctx.obj[\"hostname\"]}/api/mysql.php')\n if response.status_code != 200:\n raise click.ClickException(\n f'{ctx.obj[\"hostname\"]}/api/mysql.php Not Found!'\n )\n\n response = requests.get(url=f'{ctx.obj[\"hostname\"]}/api/mysql.php')\n response.raise_for_status()\n results.append(\n BenchmarkResult(\n timestamp=time.time(), number=number, data=response.json()\n )\n )\n time.sleep(ctx.obj[\"sleep\"])\n\n insert_timings = get_timings(results, \"insert\")\n insert_single_transaction_timings = get_timings(\n results, \"insertSingleTransaction\"\n )\n result = {\n \"results\": results,\n \"timings\": {\n \"insert\": calculate_timing_stats(insert_timings),\n \"insert_single_transaction\": calculate_timing_stats(\n insert_single_transaction_timings\n ),\n },\n }\n table = render_table(result)\n click.echo(table)", "def Run(benchmark_spec):\n _UpdateBenchmarkSpecWithFlags(benchmark_spec)\n vm = benchmark_spec.vms[0]\n\n if benchmark_spec.tpus:\n mnist_benchmark_script = 'mnist_tpu.py'\n mnist_benchmark_cmd = ('cd tpu/models && '\n 'export PYTHONPATH=$(pwd) && '\n 'cd official/mnist && '\n 'python {script} '\n '--data_dir={data_dir} '\n '--iterations={iterations} '\n '--model_dir={model_dir} '\n '--batch_size={batch_size}'.format(\n script=mnist_benchmark_script,\n data_dir=benchmark_spec.data_dir,\n iterations=benchmark_spec.iterations,\n model_dir=benchmark_spec.model_dir,\n batch_size=benchmark_spec.batch_size))\n else:\n mnist_benchmark_script = 'mnist.py'\n mnist_benchmark_cmd = ('cd models && '\n 'export PYTHONPATH=$(pwd) && '\n 'cd official/mnist && '\n 'python {script} '\n '--data_dir={data_dir} '\n '--model_dir={model_dir} '\n '--batch_size={batch_size} '.format(\n script=mnist_benchmark_script,\n data_dir=benchmark_spec.data_dir,\n model_dir=benchmark_spec.model_dir,\n batch_size=benchmark_spec.batch_size))\n\n if nvidia_driver.CheckNvidiaGpuExists(vm):\n mnist_benchmark_cmd = '{env} {cmd}'.format(\n env=tensorflow.GetEnvironmentVars(vm), cmd=mnist_benchmark_cmd)\n samples = []\n metadata = CreateMetadataDict(benchmark_spec)\n\n if benchmark_spec.train_steps > 0:\n if benchmark_spec.tpus:\n tpu = benchmark_spec.tpu_groups['train'].GetName()\n num_shards = '--num_shards={}'.format(\n benchmark_spec.tpu_groups['train'].GetNumShards())\n else:\n tpu = num_shards = ''\n\n if benchmark_spec.tpus:\n mnist_benchmark_train_cmd = (\n '{cmd} --tpu={tpu} --use_tpu={use_tpu} --train_steps={train_steps} '\n '{num_shards} --noenable_predict'.format(\n cmd=mnist_benchmark_cmd,\n tpu=tpu,\n use_tpu=bool(benchmark_spec.tpus),\n train_steps=benchmark_spec.train_steps,\n num_shards=num_shards))\n else:\n mnist_benchmark_train_cmd = (\n '{cmd} --train_epochs={train_epochs} '.format(\n cmd=mnist_benchmark_cmd,\n train_epochs=benchmark_spec.train_epochs))\n\n start = time.time()\n stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_train_cmd)\n elapsed_seconds = (time.time() - start)\n samples.extend(MakeSamplesFromTrainOutput(\n metadata, stdout + stderr, elapsed_seconds, benchmark_spec.train_steps))\n\n if benchmark_spec.eval_steps > 0:\n if benchmark_spec.tpus:\n mnist_benchmark_eval_cmd = (\n '{cmd} --tpu={tpu} --use_tpu={use_tpu} --eval_steps={eval_steps}'\n .format(\n cmd=mnist_benchmark_cmd,\n use_tpu=bool(benchmark_spec.tpus),\n tpu=benchmark_spec.tpu_groups['eval'].GetName(),\n eval_steps=benchmark_spec.eval_steps))\n else:\n mnist_benchmark_eval_cmd = ('{cmd} --eval_steps={eval_steps}'.format(\n cmd=mnist_benchmark_cmd, eval_steps=benchmark_spec.eval_steps))\n\n stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_eval_cmd)\n samples.extend(MakeSamplesFromEvalOutput(metadata, stdout + stderr,\n elapsed_seconds))\n return samples", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def main(args=None):\n pass", "def main():\n parser = argparse.ArgumentParser(description=\"\"\"Tester for YT Data API and different inputs\"\"\")\n parser.add_argument('-a', '--analytics', help='Performs a basic analytics lookup for the user\\'s channel entered')\n parser.add_argument('-c', '--comments', help='Performs a lookup of comments for the video id entered')\n args = parser.parse_args()\n\n if args.analytics:\n analytics = args.analytics\n analyt(analytics)\n\n if args.comments:\n comments = args.comments\n get_comments(comments)", "def benchmark(options):\n # Prepare experiments\n with open(options['<benchmark>']) as f:\n benchmark_config = json.loads(f.read())\n generate_agent_configs(benchmark_config)\n experiments = product(benchmark_config['environments'], benchmark_config['agents'], [options])\n\n # Run evaluations\n with Pool(processes=int(options['--processes'])) as pool:\n results = pool.starmap(evaluate, experiments)\n\n # Clean temporary config files\n generate_agent_configs(benchmark_config, clean=True)\n\n # Write evaluations summary\n benchmark_filename = os.path.join(Evaluation.OUTPUT_FOLDER, '{}_{}.{}.json'.format(\n BENCHMARK_FILE, datetime.datetime.now().strftime('%Y%m%d-%H%M%S'), os.getpid()))\n with open(benchmark_filename, 'w') as f:\n json.dump(results, f, sort_keys=True, indent=4)\n gym.logger.info('Benchmark done. Summary written in: {}'.format(benchmark_filename))", "def main(args):", "def main(args):", "def test_make_benchmark_from_command_line_multiple_input_sources(\n env: LlvmEnv, retcode: int\n):\n with temporary_working_directory() as cwd:\n with open(\"a.c\", \"w\") as f:\n f.write(\"int main() { return B(); }\")\n\n with open(\"b.c\", \"w\") as f:\n f.write(f\"int B() {{ return {retcode}; }}\")\n\n bm = env.make_benchmark_from_command_line([\"gcc\", \"a.c\", \"b.c\", \"-o\", \"foo\"])\n assert not (cwd / \"foo\").is_file()\n\n env.reset(benchmark=bm)\n assert \"main()\" in env.ir\n\n bm.compile(env)\n assert (cwd / \"foo\").is_file()\n\n p = subprocess.Popen([\"./foo\"])\n p.communicate(timeout=60)\n assert p.returncode == retcode", "def main(args):\n\n print(now(), \"benchmark_test.py running.\")\n out_dir = args.output\n if not args.from_scratch:\n print(\n f\"Testing poisons from {args.poisons_path}, in the transfer learning setting...\\n\".format()\n )\n\n ####################################################\n # Frozen Feature Extractor (ffe)\n print(\"Frozen Feature Extractor test:\")\n args.num_poisons = 25\n args.trainset_size = 2500\n args.val_period = 20\n args.optimizer = \"SGD\"\n args.lr = 0.01\n args.lr_schedule = [30]\n args.epochs = 40\n\n args.end2end = False\n\n # white-box attack\n args.output = os.path.join(out_dir, \"ffe-wb\")\n args.model = \"resnet18\"\n args.model_path = whitebox_modelpath\n poison_test.main(args)\n\n # grey box attack\n args.model = \"resnet18\"\n args.model_path = greybox_modelpath\n args.output = os.path.join(out_dir, \"ffe-gb\")\n poison_test.main(args)\n\n # black box attacks\n args.output = os.path.join(out_dir, \"ffe-bb\")\n\n args.model = \"MobileNetV2\"\n args.model_path = blackbox_modelpath[0]\n poison_test.main(args)\n\n args.model_path = blackbox_modelpath[1]\n args.model = \"VGG11\"\n poison_test.main(args)\n ####################################################\n\n ####################################################\n # End-To-End Fine Tuning (e2e)\n print(\"End-To-End Fine Tuning test:\")\n args.num_poisons = 25\n args.trainset_size = 2500\n args.val_period = 20\n args.optimizer = \"SGD\"\n args.lr = 0.01\n args.lr_schedule = [30]\n args.epochs = 40\n\n args.end2end = True\n\n # white-box attack\n args.output = os.path.join(out_dir, \"e2e-wb\")\n args.model = \"resnet18\"\n args.model_path = whitebox_modelpath\n poison_test.main(args)\n\n # grey box attack\n args.model = \"resnet18\"\n args.model_path = greybox_modelpath\n args.output = os.path.join(out_dir, \"e2e-gb\")\n poison_test.main(args)\n\n # black box attacks\n args.output = os.path.join(out_dir, \"e2e-bb\")\n\n args.model = \"MobileNetV2\"\n args.model_path = blackbox_modelpath[0]\n poison_test.main(args)\n\n args.model = \"VGG11\"\n args.model_path = blackbox_modelpath[1]\n poison_test.main(args)\n ####################################################\n\n else:\n print(\n f\"Testing poisons from {args.poisons_path}, in the from scratch training setting...\\n\".format()\n )\n\n ####################################################\n # From Scratch Training (fst)\n args.num_poisons = 500\n args.trainset_size = 50000\n args.val_period = 20\n args.optimizer = \"SGD\"\n args.lr = 0.1\n args.lr_schedule = [100, 150]\n args.epochs = 200\n args.model_path = \"\"\n args.output = os.path.join(out_dir, \"fst\")\n\n args.model = \"resnet18\"\n poison_test.main(args)\n\n args.model = \"MobileNetV2\"\n poison_test.main(args)\n\n args.model = \"VGG11\"\n poison_test.main(args)\n ####################################################", "def Run(benchmark_spec):\n _UpdateBenchmarkSpecWithFlags(benchmark_spec)\n vm = benchmark_spec.vms[0]\n if benchmark_spec.tpus:\n # For MLPerf 1.0, the benchmake code of different hardware are different.\n if (benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-32' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-128' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-256' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-512' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-1024' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-2048'):\n run_path = (\n '$HOME/training_results_{version}/Google/benchmarks/{model}/tpu-{tpus}'\n .format(\n version=VERSION.value,\n model=benchmark_spec.benchmark,\n tpus=benchmark_spec.tpu_groups['train'].GetAcceleratorType()))\n code_path = (\n '$HOME/training_results_{version}/Google/benchmarks/{model}/implementations/tpu-{tpus}-{model}'\n .format(\n version=VERSION.value,\n model=benchmark_spec.benchmark,\n tpus=benchmark_spec.tpu_groups['train'].GetAcceleratorType()))\n\n if MASK in benchmark_spec.benchmark:\n model = 'mask_rcnn'\n elif GNMT in benchmark_spec.benchmark:\n model = 'nmt'\n else:\n model = benchmark_spec.benchmark\n\n mlperf_benchmark_cmd = (\n 'cd {code_path} && '\n 'export PYTHONPATH=$(pwd):$(pwd)/{model} && '\n 'cd {model} && '\n '{run_path}/run_and_time.sh'.format(\n code_path=code_path,\n model=model,\n run_path=run_path))\n\n if SSD in benchmark_spec.benchmark:\n mlperf_benchmark_cmd = (\n 'export '\n 'MLP_GCS_RESNET_CHECKPOINT={checkpoint}'\n ' && {cmd}'.format(\n checkpoint=FLAGS.mlperf_gcs_resnet_checkpoint,\n cmd=mlperf_benchmark_cmd))\n else:\n raise ValueError(\n 'MLPerf configurations do not support the hardware in PKB. PKB may '\n 'need to be updated if this is a new TPU type.')\n\n else:\n run_sub_paths = {RESNET: 'resnet/implementations/mxnet',\n TRANSFORMER: 'transformer/implementations/pytorch',\n MINIGO: 'minigo/implementations/tensorflow',\n MASK: 'maskrcnn/implementations/pytorch',\n GNMT: 'gnmt/implementations/pytorch',\n SSD: 'ssd/implementations/pytorch',\n BERT: 'bert/implementations/pytorch',}\n benchmark_path = f'$HOME/training_results_{VERSION.value}/NVIDIA/benchmarks'\n run_path = posixpath.join(benchmark_path,\n run_sub_paths[benchmark_spec.benchmark])\n env = {\n 'DGXSYSTEM': DGXSYSTEM,\n 'NEXP': 1,\n 'PULL': 0,\n 'LOGDIR': f'/tmp/{benchmark_spec.benchmark}',\n }\n envs = {\n RESNET: {},\n TRANSFORMER: {'DATADIR': '/data/wmt/utf8'},\n MINIGO: {'CONT': 'mlperf-nvidia:minigo'},\n MASK: {},\n GNMT: {'DATADIR': '/data/gnmt'},\n SSD: {'DATADIR': '/data'},\n BERT: {}\n }\n env.update(envs[benchmark_spec.benchmark])\n\n run_script = posixpath.join(run_path, 'run_with_docker.sh')\n vm_util.ReplaceText(vm, 'SYSLOGGING=1', 'SYSLOGGING=0', run_script)\n vm_util.ReplaceText(vm, 'docker exec -it', 'docker exec -t', run_script)\n if benchmark_spec.benchmark == RESNET:\n vm_util.ReplaceText(vm, r'mpirun.*run_and_time\\.sh',\n r'.\\/run_and_time.sh', run_script)\n\n env = ' '.join(f'{key}={value}' for key, value in env.items())\n if nvidia_driver.CheckNvidiaGpuExists(vm):\n env = f'{tensorflow.GetEnvironmentVars(vm)} {env}'\n\n mlperf_benchmark_cmd = (\n f'chmod 755 {run_script} && '\n f'cd {run_path} && '\n f'{env} {run_script}')\n\n samples = []\n metadata = _CreateMetadataDict(benchmark_spec)\n stdout, _ = vm.RobustRemoteCommand(mlperf_benchmark_cmd)\n if NONE in FLAGS.mlperf_profiler:\n samples.extend(\n MakeSamplesFromOutput(\n metadata,\n stdout,\n use_tpu=bool(benchmark_spec.tpus),\n model=benchmark_spec.benchmark))\n return samples", "def usage():\n \n print '-b <bench> the bench to show.'\n print '-c <config> the config to show (GPU, 8888, 565, etc).'\n print '-d <dir> a directory containing bench_r<revision>_<scalar> files.'\n print '-e <file> file containing expected bench values/ranges.'\n print ' Will raise exception if actual bench values are out of range.'\n print ' See bench_expectations.txt for data format and examples.'\n print '-f <revision>[:<revision>] the revisions to use for fitting.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-i <time> the time to ignore (w, c, g, etc).'\n print ' The flag is ignored when -t is set; otherwise we plot all the'\n print ' times except the one specified here.'\n print '-l <title> title to use for the output graph'\n print '-m <representation> representation of bench value.'\n print ' See _ListAlgorithm class in bench_util.py.'\n print '-o <path> path to which to write output; writes to stdout if not specified'\n print '-r <revision>[:<revision>] the revisions to show.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-s <setting>[=<value>] a setting to show (alpha, scalar, etc).'\n print '-t <time> the time to show (w, c, g, etc).'\n print '-x <int> the desired width of the svg.'\n print '-y <int> the desired height of the svg.'\n print '--default-setting <setting>[=<value>] setting for those without.'", "def run_benchmark_job():\n args = parse_args()\n app_dir = os.path.join(str(os.environ['BENCHMARK_DIR']), \"ks-app\")\n\n kubeconfig_path = str(os.environ['KUBECONFIG'])\n api_client = deploy_utils.create_k8s_client(kubeconfig_path)\n\n namespace = args.namespace\n job_name = args.experiment_name\n\n # Set the namespace of kb job to default\n namespace = \"default\"\n # Deploy Kubebench\n util.run([\"ks\", \"generate\", \"kubebench-job\", job_name, \"--name=\" + job_name], cwd=app_dir)\n job_config_prefix = \"ks param set \" + job_name + \" \"\n\n cmd = job_config_prefix + \"mainJobKsRegistry \" + args.training_job_registry\n util.run(cmd.split(), cwd=app_dir)\n cmd = job_config_prefix + \"mainJobKsPackage \" + args.training_job_pkg\n util.run(cmd.split(), cwd=app_dir)\n cmd = job_config_prefix + \"mainJobKsPrototype \" + args.training_job_prototype\n util.run(cmd.split(), cwd=app_dir)\n cmd = job_config_prefix + \"mainJobConfig \" + args.training_job_config\n util.run(cmd.split(), cwd=app_dir)\n\n cmd = job_config_prefix + \"awsCredentialsSecret \" + args.aws_secret\n util.run(cmd.split(), cwd=app_dir)\n cmd = job_config_prefix + \"awsCredentialsSecretAccessKeyId \" + args.aws_access_key_id\n util.run(cmd.split(), cwd=app_dir)\n cmd = job_config_prefix + \"awsCredentialsSecretAccessKey \" + args.aws_secret_access_key\n util.run(cmd.split(), cwd=app_dir)\n cmd = job_config_prefix + \"awsRegion \" + args.aws_region\n util.run(cmd.split(), cwd=app_dir)\n\n cmd = job_config_prefix + \"githubTokenSecret \" + args.github_secret_name\n util.run(cmd.split(), cwd=app_dir)\n cmd = job_config_prefix + \"githubTokenSecretKey GITHUB_TOKEN\"\n util.run(cmd.split(), cwd=app_dir)\n cmd = job_config_prefix + \"controllerImage seedjeffwan/configurator:20190415\"\n util.run(cmd.split(), cwd=app_dir)\n cmd = job_config_prefix + \"postJobImage seedjeffwan/mpi-post-processor:logs\"\n util.run(cmd.split(), cwd=app_dir)\n cmd = job_config_prefix + \"postJobArgs null\"\n util.run(cmd.split(), cwd=app_dir)\n cmd = job_config_prefix + \"reporterType null\"\n util.run(cmd.split(), cwd=app_dir)\n\n cmd = job_config_prefix + \"experimentDataPvc \" + args.data_pvc\n util.run(cmd.split(), cwd=app_dir)\n\n # cmd = \"ks param set \" + job_name + \" config_args -- --config-file=\" + pvc_mount + \\\n # \"/config/\" + config_name + \".yaml\"\n # util.run(cmd.split(), cwd=app_dir)\n # cmd = \"ks param set \" + job_name + \" report_args -- --output-file=\" + pvc_mount + \\\n # \"/output/results.csv\"\n # util.run(cmd.split(), cwd=app_dir)\n\n apply_command = [\"ks\", \"apply\", \"default\", \"-c\", job_name]\n util.run(apply_command, cwd=app_dir)\n\n # TODO: expose timeout setting here.\n deploy_utils.wait_for_benchmark_job(job_name, namespace)\n deploy_utils.cleanup_benchmark_job(app_dir, job_name)", "def main(self):\n try:\n self.parse_args()\n self.run()\n return 0\n except AnalysisBackendError as e:\n L.error(e)\n return 1", "def sciml_bench_run(smlb_in: RuntimeIn, smlb_out: RuntimeOut):\n # activate monitor\n # Note: To use smlb_out, you must activate it, passing the rank\n # information initialized by your distributed learning environment;\n # for a non-distributed benchmark, simply pass rank=0, local_rank=0\n # and activate_log_on_host(_device)=False; here we use True for\n # demonstration -- the log on host0 and device0 will be the same as\n # that on console except for some small differences in time\n # measurements.\n smlb_out.activate(rank=0, local_rank=0, activate_log_on_host=True,\n activate_log_on_device=True, console_on_screen=True)\n\n # log top level process\n # Note: Calling begin(), ended() and message() on smlb_out.log means\n # calling these functions on console, host and device; nothing\n # happens when calling these functions on an unactivated logger.\n log = smlb_out.log\n log.begin('Running benchmark MNIST_tf_keras')\n\n # parse input arguments (only batch_size and epochs)\n # Note: Use try_get() to get a benchmark-specific argument safely from\n # smlb_in.bench_args (passed by users via -b).\n with log.subproc('Parsing input arguments'):\n # hyperparameters\n batch_size = smlb_in.bench_args.try_get('batch_size', default=64)\n epochs = smlb_in.bench_args.try_get('epochs', default=2)\n log.message(f'batch_size = {batch_size}')\n log.message(f'epochs = {epochs}')\n\n # create datasets\n with log.subproc('Creating datasets'):\n dataset_dir = smlb_in.dataset_dir\n train_set = create_dataset_mnist(dataset_dir / 'train.hdf5', batch_size)\n test_set = create_dataset_mnist(dataset_dir / 'test.hdf5', batch_size)\n log.message(f'Dataset directory: {dataset_dir}')\n\n # create model\n with log.subproc('Creating CNN model'):\n model = create_model_mnist()\n\n # train model\n log.begin('Training CNN model')\n # fit()\n with log.subproc('Running model.fit()'):\n # stamp model.fit in system monitor\n # Note: smlb_out.system will monitor system usage regularly; use\n # smlb_out.system.stamp_event() to stamp an event in the report\n smlb_out.system.stamp_event('model.fit')\n history = model.fit(train_set, epochs=epochs, batch_size=batch_size,\n validation_data=test_set, verbose=0,\n callbacks=[LogEpochCallback(smlb_out)])\n # save model\n with log.subproc('Saving model weights'):\n weights_file = smlb_in.output_dir / 'model_weights.h5'\n model.save(weights_file)\n log.message(f'Saved to: {weights_file}')\n # save history\n with log.subproc('Saving training history'):\n history_file = smlb_in.output_dir / 'training_history.yml'\n with open(history_file, 'w') as handle:\n yaml.dump(history.history, handle)\n log.message(f'Saved to: {history_file}')\n log.ended('Training CNN model')\n\n # predict\n with log.subproc('Making predictions on test set'):\n with h5py.File(dataset_dir / 'test.hdf5', 'r') as h5_file:\n # stamp model.predict in system monitor\n smlb_out.system.stamp_event('model.predict')\n pred = model.predict(np.expand_dims(h5_file['image'][:], -1) / 255)\n correct = np.sum(pred.argmax(axis=1) == h5_file['label'][:])\n log.message(f'{correct} correct predictions for {len(pred)} images '\n f'(accuracy: {correct / len(pred) * 100:.2f}%)')\n\n # end top level\n log.ended('Running benchmark MNIST_tf_keras')", "def run_main():\n main(sys.argv)", "def main( argv = None ):\n\n if not argv: argv = sys.argv\n\n # setup command line parser\n parser = optparse.OptionParser( version = \"%prog version: $Id$\",\n usage = globals()[\"__doc__\"] )\n\n parser.add_option( \"-p\", \"--proc\", dest=\"processors\", type=\"int\",\n help = \"use # processors [%default]\" )\n\n parser.set_defaults(\n processors = 1 )\n\n\n options, args = E.Start( parser, argv = argv )\n\n t1 = Test( RunnerGat, \n small_test_segmented_workspaces(), \n [ ValidatorNumSamples,\n ValidatorSegmentDistribution ] )\n\n t1.run( options.stdout, \n processors = options.processors )\n\n E.Stop()", "def main():\n cli_args = parse_cli_args()\n\n timbres_registry = create_timbres_registry(cli_args.presets_path)\n if cli_args.safe:\n for _, timbre_spec in timbres_registry.items():\n validate_timbre_spec(timbre_spec)\n\n if cli_args.config_path is None:\n config = resource_string(__name__, 'default_config.yml')\n settings = yaml.safe_load(config)\n else:\n with open(cli_args.config_path) as config_file:\n settings = yaml.safe_load(config_file)\n settings['timbres_registry'] = timbres_registry\n\n timeline = convert_tsv_to_timeline(cli_args.input_path, settings)\n write_timeline_to_wav(\n cli_args.output_path,\n timeline,\n settings['frame_rate']\n )", "def main():\n args = get_args()\n\n src_dir = args.input\n\n if os.path.exists(args.output):\n print(\"output directory already exists\")\n sys.exit(1)\n os.makedirs(args.output)\n copy_submission_dir(args.input, args.output, args.submitter)\n src_dir = args.output\n\n config = checker.Config(\n args.version,\n args.extra_model_benchmark_map)\n\n if not args.nodelete_empty_dirs:\n delete_empty_dirs(os.path.join(src_dir))\n\n os.chdir(src_dir)\n\n infer_scenario_results(args.submitter, args.noinfer_low_accuracy_results, config)\n\n return 0", "def run():\n\n call_args = sys.argv[1:]\n main(call_args)", "def BenchArgs(data_file):\n return ['--timers', 'wcg', '--logFile', data_file]", "def main():\n run_test_all()", "def main():\n parser = argparse.ArgumentParser(description=main.__doc__)\n parser.add_argument('--maxsteps', type=int, default=100000)\n parser.add_argument('--seed', type=int, default=0)\n parser.add_argument('--savefile', type=str, required=True)\n nproc = max(cpu_count() - 1, 1)\n parser.add_argument('--maxprocs', type=int, default=nproc)\n args = parser.parse_args()\n\n seed = args.seed\n np.random.seed(seed)\n venv = gen_vectorized_pong_env(args.maxprocs)\n policy = create_random_policy(venv)\n\n num_timesteps = 0\n paths = []\n while num_timesteps < args.maxsteps:\n print('{: 10d} of {: 10d} steps'.format(\n num_timesteps, args.maxsteps))\n new_paths = vsample(venv, policy)\n paths += new_paths\n num_timesteps += sum(len(path.obs) for path in new_paths)\n\n dataset = Dataset.from_paths(venv, paths)\n print('Generated', len(dataset.obs), 'timesteps total')\n dataset.save(args.savefile)", "def main():\n sys.exit(RBExt().run(sys.argv[1:]))", "def _run_benchmark(shell, shell_args, name, app, duration_seconds, measurements,\n verbose, android, save_traces):\n timeout = duration_seconds + _EXTRA_TIMEOUT\n benchmark_args = []\n benchmark_args.append('--app=' + app)\n benchmark_args.append('--duration=' + str(duration_seconds))\n\n output_file = None\n device_output_file = None\n if save_traces:\n output_file = 'benchmark-%s-%s.trace' % (name.replace(' ', '_'),\n time.strftime('%Y%m%d%H%M%S'))\n if android:\n device_output_file = os.path.join(shell.get_tmp_dir_path(), output_file)\n benchmark_args.append('--trace-output=' + device_output_file)\n else:\n benchmark_args.append('--trace-output=' + output_file)\n\n for measurement in measurements:\n benchmark_args.append(measurement)\n\n shell_args = list(shell_args)\n shell_args.append(_BENCHMARK_APP)\n shell_args.append('--force-offline-by-default')\n shell_args.append('--args-for=%s %s' % (_BENCHMARK_APP,\n ' '.join(benchmark_args)))\n\n if verbose:\n print 'shell arguments: ' + str(shell_args)\n return_code, output, did_time_out = shell.run_and_get_output(\n shell_args, timeout=timeout)\n\n if did_time_out:\n return False, 'timed out', output\n if return_code:\n return False, 'return code: ' + str(return_code), output\n\n # Pull the trace file even if some measurements are missing, as it can be\n # useful in debugging.\n if device_output_file:\n shell.pull_file(device_output_file, output_file, remove_original=True)\n\n return True, None, output", "def main():\n data_loader = TinyPerformanceLoader()\n data_loader.archive_corpus()", "def main(argv):\n\n output_filename = ''\n input_filename = ''\n langCode = 'en'\n language = False\n\n # add support for default (en) language\n language = gettext.translation(\n 'webperf-core', localedir='locales', languages=[langCode])\n language.install()\n _ = language.gettext\n\n try:\n opts, args = getopt.getopt(\n argv, \"hi:o:\", [\"help\", \"input=\", \"output=\"])\n except getopt.GetoptError:\n print(main.__doc__)\n sys.exit(2)\n\n if (opts.__len__() == 0):\n print(main.__doc__)\n sys.exit(2)\n\n for opt, arg in opts:\n if opt in ('-h', '--help'): # help\n print(main.__doc__)\n sys.exit(2)\n elif opt in (\"-i\", \"--input\"): # input file path\n input_filename = arg\n\n file_ending = \"\"\n file_long_ending = \"\"\n if (len(input_filename) > 4):\n file_ending = input_filename[-4:].lower()\n if (len(input_filename) > 7):\n file_long_ending = input_filename[-7:].lower()\n\n if file_long_ending == \".sqlite\":\n from engines.sqlite import read_sites, add_site, delete_site\n elif (file_ending == \".csv\"):\n from engines.csv import read_sites, add_site, delete_site\n elif (file_ending == \".xml\"): # https://example.com/sitemap.xml\n from engines.sitemap import read_sites, add_site, delete_site\n else:\n from engines.json import read_tests, read_sites, add_site, delete_site\n pass\n elif opt in (\"-o\", \"--output\"): # output file path\n output_filename = arg\n pass\n\n tests = read_tests(input_filename, 0, -1)\n generated_date = False\n co2s = list()\n\n for test in tests:\n if not generated_date:\n generated_date = datetime.fromisoformat(\n test[FIELD_INDEX_DATE]).strftime('%Y-%m-%d')\n\n str_data = test[FIELD_INDEX_DATA].replace('\\'', '\"')\n data = json.loads(str_data)\n print(str_data)\n co2s.append(data['co2'])\n\n if not generated_date:\n generated_date = datetime.today().strftime('%Y-%m-%d')\n\n output_content = \"# This array was last generated with carbon-rating.py on {0}\\n\".format(\n generated_date)\n output_content += \"def get_generated_date():\\n\"\n output_content += \"\\treturn '{0}'\\n\".format(\n generated_date)\n output_content += \"\\n\"\n output_content += \"def get_percentiles():\\n\"\n output_content += \"\\treturn [\\n\"\n\n co2s_sorted = sorted(co2s)\n\n intervals = list()\n\n index = 1\n while (index <= 100):\n percentile = getPercentile(co2s_sorted, index)\n intervals.append(percentile)\n position = index - 1\n if index < 100:\n if position % 10 == 0 and position != 0:\n output_content += \"\\t\\t# {0} percentile\\n\".format(position)\n\n output_content += \"\\t\\t{0},\\n\".format(percentile)\n else:\n output_content += \"\\t\\t{0}\\n\".format(percentile)\n index += 1\n\n output_content += \"\\t]\"\n\n print(output_content)\n if (len(output_filename) > 0):\n write(output_filename, output_content)", "def run_benchmark(curl, benchmark, test_config = TestConfig()):\n\n warmup_runs = benchmark.warmup_runs\n benchmark_runs = benchmark.benchmark_runs\n message = '' #Message is name of benchmark... print it?\n\n if (warmup_runs <= 0):\n raise Exception(\"Invalid number of warmup runs, must be > 0 :\" + warmup_runs)\n if (benchmark_runs <= 0):\n raise Exception(\"Invalid number of benchmark runs, must be > 0 :\" + benchmark_runs)\n\n #Initialize variables to store output\n output = BenchmarkResult()\n output.name = benchmark.name\n output.group = benchmark.group\n metricnames = list(benchmark.metrics)\n metricvalues = [METRICS[name] for name in metricnames] # Metric variable for curl, to avoid hash lookup for every metric name\n results = [list() for x in xrange(0, len(metricnames))] # Initialize arrays to store results for each metric\n\n curl.setopt(pycurl.WRITEFUNCTION, lambda x: None) #Do not store actual response body at all.\n\n #Benchmark warm-up to allow for caching, JIT compiling, on client\n logging.info('Warmup: ' + message + ' started')\n for x in xrange(0, warmup_runs):\n if benchmark.method == u'POST' or benchmark.method == u'PUT':\n curl.setopt(curl.READFUNCTION, StringIO.StringIO(benchmark.body).read)\n curl.perform()\n logging.info('Warmup: ' + message + ' finished')\n\n logging.info('Benchmark: ' + message + ' starting')\n\n for x in xrange(0, benchmark_runs): # Run the actual benchmarks\n if benchmark.method == u'POST' or benchmark.method == u'PUT':\n curl.setopt(curl.READFUNCTION, StringIO.StringIO(benchmark.body).read)\n\n try: # Run the curl call, if it errors, then add to failure counts for benchmark\n curl.perform()\n except Exception:\n output.failures = output.failures + 1\n continue # Skip metrics collection\n\n # Get all metrics values for this run, and store to metric lists\n for i in xrange(0, len(metricnames)):\n results[i].append( curl.getinfo(metricvalues[i]) )\n\n logging.info('Benchmark: ' + message + ' ending')\n\n temp_results = dict()\n for i in xrange(0, len(metricnames)):\n temp_results[metricnames[i]] = results[i]\n output.results = temp_results\n\n curl.close()\n return analyze_benchmark_results(output, benchmark)", "def main():\r\n algos = [merge_sort, quick_sort, heap_sort, radix_sort, bucket_sort_general]\r\n array_sizes = [5000, 10000, 15000, 20000, 50000, 75000, 100000, 150000]\r\n results = {algo.__name__: [] for algo in algos}\r\n for algo in algos:\r\n result = []\r\n for size in array_sizes:\r\n time = test(algo, size)\r\n result.append(time)\r\n results[algo.__name__] = result\r\n\r\n display_results(results, array_sizes)", "def main():\n \n try:\n opts, _ = getopt.getopt(sys.argv[1:]\n , \"b:c:d:e:f:i:l:m:o:r:s:t:x:y:\"\n , \"default-setting=\")\n except getopt.GetoptError, err:\n print str(err) \n usage()\n sys.exit(2)\n \n directory = None\n config_of_interest = None\n bench_of_interest = None\n time_of_interest = None\n time_to_ignore = None\n bench_expectations = {}\n rep = None # bench representation algorithm\n revision_range = '0:'\n regression_range = '0:'\n latest_revision = None\n requested_height = None\n requested_width = None\n title = 'Bench graph'\n settings = {}\n default_settings = {}\n\n def parse_range(range):\n \"\"\"Takes '<old>[:<new>]' as a string and returns (old, new).\n Any revision numbers that are dependent on the latest revision number\n will be filled in based on latest_revision.\n \"\"\"\n old, _, new = range.partition(\":\")\n old = int(old)\n if old < 0:\n old += latest_revision;\n if not new:\n new = latest_revision;\n new = int(new)\n if new < 0:\n new += latest_revision;\n return (old, new)\n\n def add_setting(settings, setting):\n \"\"\"Takes <key>[=<value>] adds {key:value} or {key:True} to settings.\"\"\"\n name, _, value = setting.partition('=')\n if not value:\n settings[name] = True\n else:\n settings[name] = value\n\n def read_expectations(expectations, filename):\n \"\"\"Reads expectations data from file and put in expectations dict.\"\"\"\n for expectation in open(filename).readlines():\n elements = expectation.strip().split(',')\n if not elements[0] or elements[0].startswith('#'):\n continue\n if len(elements) != 5:\n raise Exception(\"Invalid expectation line format: %s\" %\n expectation)\n bench_entry = elements[0] + ',' + elements[1]\n if bench_entry in expectations:\n raise Exception(\"Dup entries for bench expectation %s\" %\n bench_entry)\n # [<Bench_BmpConfig_TimeType>,<Platform-Alg>] -> (LB, UB)\n expectations[bench_entry] = (float(elements[-2]),\n float(elements[-1]))\n\n def check_expectations(lines, expectations, newest_revision, key_suffix):\n \"\"\"Check if there are benches in latest rev outside expected range.\"\"\"\n exceptions = []\n for line in lines:\n line_str = str(line)\n bench_platform_key = (line_str[ : line_str.find('_{')] + ',' +\n key_suffix)\n this_revision, this_bench_value = lines[line][-1]\n if (this_revision != newest_revision or\n bench_platform_key not in expectations):\n # Skip benches without value for latest revision.\n continue\n this_min, this_max = expectations[bench_platform_key]\n if this_bench_value < this_min or this_bench_value > this_max:\n exceptions.append('Bench %s value %s out of range [%s, %s].' %\n (bench_platform_key, this_bench_value, this_min, this_max))\n if exceptions:\n raise Exception('Bench values out of range:\\n' +\n '\\n'.join(exceptions))\n\n try:\n for option, value in opts:\n if option == \"-b\":\n bench_of_interest = value\n elif option == \"-c\":\n config_of_interest = value\n elif option == \"-d\":\n directory = value\n elif option == \"-e\":\n read_expectations(bench_expectations, value)\n elif option == \"-f\":\n regression_range = value\n elif option == \"-i\":\n time_to_ignore = value\n elif option == \"-l\":\n title = value\n elif option == \"-m\":\n rep = value\n elif option == \"-o\":\n redirect_stdout(value)\n elif option == \"-r\":\n revision_range = value\n elif option == \"-s\":\n add_setting(settings, value)\n elif option == \"-t\":\n time_of_interest = value\n elif option == \"-x\":\n requested_width = int(value)\n elif option == \"-y\":\n requested_height = int(value)\n elif option == \"--default-setting\":\n add_setting(default_settings, value)\n else:\n usage()\n assert False, \"unhandled option\"\n except ValueError:\n usage()\n sys.exit(2)\n\n if directory is None:\n usage()\n sys.exit(2)\n\n if time_of_interest:\n time_to_ignore = None\n\n # The title flag (-l) provided in buildbot slave is in the format\n # Bench_Performance_for_Skia_<platform>, and we want to extract <platform>\n # for use in platform_and_alg to track matching benches later. If title flag\n # is not in this format, there may be no matching benches in the file\n # provided by the expectation_file flag (-e).\n platform_and_alg = title\n if platform_and_alg.startswith(TITLE_PREAMBLE):\n platform_and_alg = (\n platform_and_alg[TITLE_PREAMBLE_LENGTH:] + '-' + rep)\n title += ' [representation: %s]' % rep\n\n latest_revision = get_latest_revision(directory)\n oldest_revision, newest_revision = parse_range(revision_range)\n oldest_regression, newest_regression = parse_range(regression_range)\n\n unfiltered_revision_data_points = parse_dir(directory\n , default_settings\n , oldest_revision\n , newest_revision\n , rep)\n\n # Filter out any data points that are utterly bogus... make sure to report\n # that we did so later!\n (allowed_revision_data_points, ignored_revision_data_points) = filter_data_points(\n unfiltered_revision_data_points)\n\n # Update oldest_revision and newest_revision based on the data we could find\n all_revision_numbers = allowed_revision_data_points.keys()\n oldest_revision = min(all_revision_numbers)\n newest_revision = max(all_revision_numbers)\n\n lines = create_lines(allowed_revision_data_points\n , settings\n , bench_of_interest\n , config_of_interest\n , time_of_interest\n , time_to_ignore)\n\n regressions = create_regressions(lines\n , oldest_regression\n , newest_regression)\n\n output_xhtml(lines, oldest_revision, newest_revision, ignored_revision_data_points,\n regressions, requested_width, requested_height, title)\n\n check_expectations(lines, bench_expectations, newest_revision,\n platform_and_alg)", "def main(argv=sys.argv):\n log = _setup_logging()\n log.info(\"Starting {f} version {v} dataset manipulator\".format(\n f=__file__, v=__VERSION__))\n parser = get_parser()\n args = parser.parse_args()\n if args.debug:\n log.setLevel(logging.DEBUG)\n return args.func(args)\n #return main_runner_default(argv[1:], get_parser(), log)", "def _run_benchmark(self, params):\n logging.info('Running benchmark [%s]', self._get_name())\n params = benchmark_cnn.setup(params)\n bench = benchmark_cnn.BenchmarkCNN(params)\n bench.print_info()\n stats = bench.run()\n extras = {}\n extras['examples_per_sec'] = stats.get('images_per_sec')\n if 'last_average_loss' in stats:\n extras['last_average_loss'] = stats['last_average_loss']\n if 'top_1_accuracy' in stats:\n extras['top_1_accuracy'] = stats['top_1_accuracy']\n if 'top_5_accuracy' in stats:\n extras['top_5_accuracy'] = stats['top_5_accuracy']\n self.report_benchmark(\n iters=stats.get('num_steps'),\n wall_time=stats.get('average_wall_time'),\n extras=extras)", "def main():\n parser = argparse.ArgumentParser(\n description='Executes a filter from the command-line. Calls JVM start/stop automatically.')\n parser.add_argument(\"-j\", metavar=\"classpath\", dest=\"classpath\", help=\"additional classpath, jars/directories\")\n parser.add_argument(\"-X\", metavar=\"heap\", dest=\"heap\", help=\"max heap size for jvm, e.g., 512m\")\n parser.add_argument(\"-i\", metavar=\"input1\", dest=\"input1\", required=True, help=\"input file 1\")\n parser.add_argument(\"-o\", metavar=\"output1\", dest=\"output1\", required=True, help=\"output file 1\")\n parser.add_argument(\"-r\", metavar=\"input2\", dest=\"input2\", help=\"input file 2\")\n parser.add_argument(\"-s\", metavar=\"output2\", dest=\"output2\", help=\"output file 2\")\n parser.add_argument(\"-c\", metavar=\"classindex\", default=\"-1\", dest=\"classindex\", help=\"1-based class attribute index\")\n parser.add_argument(\"filter\", help=\"filter classname, e.g., weka.filters.AllFilter\")\n parser.add_argument(\"option\", nargs=argparse.REMAINDER, help=\"additional filter options\")\n parsed = parser.parse_args()\n if parsed.input2 is None and not parsed.output2 is None:\n raise Exception(\"No second input file provided ('-r ...')!\")\n\n jars = []\n if not parsed.classpath is None:\n jars = parsed.classpath.split(os.pathsep)\n params = []\n if not parsed.input1 is None:\n params.extend([\"-i\", parsed.input1])\n if not parsed.output1 is None:\n params.extend([\"-o\", parsed.output1])\n if not parsed.input2 is None:\n params.extend([\"-r\", parsed.input2])\n if not parsed.output2 is None:\n params.extend([\"-s\", parsed.output2])\n if not parsed.classindex is None:\n params.extend([\"-c\", parsed.classindex])\n\n jvm.start(jars, max_heap_size=parsed.heap, packages=True)\n\n logger.debug(\"Commandline: \" + utils.join_options(sys.argv[1:]))\n\n try:\n flter = Filter(parsed.filter)\n if len(parsed.option) > 0:\n flter.set_options(parsed.option)\n loader = Loader(classname=\"weka.core.converters.ArffLoader\")\n in1 = loader.load_file(parsed.input1)\n cls = parsed.classindex\n if str(parsed.classindex) == \"first\":\n cls = \"0\"\n if str(parsed.classindex) == \"last\":\n cls = str(in1.num_attributes() - 1)\n in1.set_class_index(int(cls))\n flter.set_inputformat(in1)\n out1 = flter.filter(in1)\n saver = Saver(classname=\"weka.core.converters.ArffSaver\")\n saver.save_file(out1, parsed.output1)\n if not parsed.input2 is None:\n in2 = loader.load_file(parsed.input2)\n in2.set_class_index(int(cls))\n out2 = flter.filter(in2)\n saver.save_file(out2, parsed.output2)\n except Exception, e:\n print(e)\n finally:\n jvm.stop()", "def main(argv=None):\n parser = argparse.ArgumentParser(description='Converts a measure of bytes and a measure of time into a normalized bandwidth metric')\n parser.add_argument('byte_quantity', type=float, help='The quantity of bytes to convert')\n parser.add_argument('byte_unit', type=str, help='The unit of bytes to convert from')\n parser.add_argument('time_quantity', type=float, help='The quantity of time to convert')\n parser.add_argument('time_unit', type=str, help='The unit of time to convert from')\n parser.add_argument('to_byte_unit', type=str, help='The unit of bytes to convert to')\n parser.add_argument('to_time_unit', type=str, nargs='?', default=\"s\", help='The unit of time to convert to (optional)')\n\n args = parser.parse_args()\n\n # convert provided byte quantity and units into to_unit\n converted_bytes = convert_cloud_storage_pricing.convert_bytes(\n args.byte_quantity,\n args.byte_unit,\n args.to_byte_unit)\n converted_time = convert_cloud_storage_pricing.convert_time(\n args.time_quantity,\n args.time_unit,\n args.to_time_unit)\n print(f\"{converted_bytes / converted_time} {args.to_byte_unit}/{args.to_time_unit}\")", "def main():\n args = parse_args()\n process_args(args)", "def run():\n # main(sys.argv[1:])\n main()", "def main(self, argv):\n\n np.random.seed(42)\n self.setup_logging()\n self.command_line(argv)\n start_time = time.time()\n\n logging.info(\"Starting Kaggle-CTMI Experiment\\n\")\n\n logging.info(\"Finding data and groundtruth...\")\n cohort = Cohort(self.shaip)\n train_cohort, test_cohort = cohort.split_cohort_train_test(0.3)\n logging.info(\"Found %d datasets\", cohort.size)\n\n if self.args.train:\n logging.info(\"Training on %d datasets...\", train_cohort.size)\n model = self.algorithm.train(train_cohort)\n Algorithm.save_model(model, self.shaip.models_dir + 'model')\n else:\n logging.info(\"Skipping training, model saved from earlier run\")\n model = self.algorithm.load_model(self.shaip.models_dir + 'model')\n\n if self.args.predict:\n logging.info(\"Prediction on %d datasets...\", test_cohort.size)\n test_predictions = self.algorithm.predict(model, test_cohort)\n else:\n logging.info(\"Skipping prediction, using predictions from earlier run\")\n # TODO: need to sort out caching of predictions\n test_predictions = None\n\n if self.args.evaluate:\n logging.info(\"Generating results to ShaipWorkspace/outputs/results/index.html...\")\n self.results.show_results(train_cohort, test_cohort,\n self.algorithm.history, test_predictions)\n\n logging.info(\"Kaggle-CTMI Experiment done in %4.1f seconds.\\n\", (time.time() - start_time))", "def main():\n vunit = vunit_pkg.VUnit.from_argv()\n vunit = map_sources(vunit)\n run_tests(vunit)", "def main():\n\n parser = ArgumentParser()\n parser.add_argument('--config', '-c', type=str, required=True, help='Path to config file')\n parser.add_argument('--input', '-i', type=str, required=True, help='Path to video')\n parser.add_argument('--snapshot_path', '-s', type=str, required=False, default='', help='Path to snapshot')\n parser.add_argument('--out_scale', type=float, default=1.0, help='Output frame scale')\n parser.add_argument('--deploy', '-d', action='store_true', help='Execute in deploy mode')\n args = parser.parse_args()\n\n assert exists(args.config)\n assert exists(args.input)\n assert exists(args.snapshot_path + '.index')\n assert args.out_scale > 0.0\n\n task_monitor = get_monitor(args.config, snapshot_path=args.snapshot_path)\n task_monitor.demo(args.input, args.out_scale, args.deploy)", "def Run(benchmark_spec):\n vms = benchmark_spec.vms\n master_vm = vms[0]\n run_command = 'cd %s && %s ./%s' % (hpcg.HPCG_DIR,\n _GetEnvironmentVars(benchmark_spec),\n RUN_SCRIPT)\n output, _ = master_vm.RobustRemoteCommand(run_command)\n return _MakeSamplesFromOutput(benchmark_spec, output)", "def main(args):\n # Results: print to console and also write to output file\n pass", "def main():\n\n # Parse arguments using `optparse'\n (opts, _) = parse_arguments(sys.argv[1:])\n\n # Initialize burnin\n (testsuites, failfast) = \\\n common.initialize(opts, TSUITES_NAMES, STALE_TSUITES_NAMES)\n testsuites = string_to_class(testsuites)\n\n # Run burnin\n # The return value denotes the success status\n return common.run_burnin(testsuites, failfast=failfast)", "def main():\n run_test_summary1a()\n run_test_summary1c()\n run_test_summary1c()", "def old_main():\n valid_formats = ('pre', 'table', 'numeric')\n from optparse import OptionParser\n parser = OptionParser()\n #parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False)\n parser.add_option('-o', '--output', dest='output_filename', metavar='FILE', default='-', help='output file')\n parser.add_option('--format', dest='format', default='pre', help='one of ' + str(valid_formats))\n parser.add_option('--test', action='store_true', dest='test', default=False, help='run some unit tests')\n options, args = parser.parse_args()\n # validate the format option\n if options.format not in valid_formats:\n print 'invalid --format parameter:', options.format, 'is not in', valid_formats\n return\n # run a test or run a demo\n if options.output_filename == '-':\n out = sys.stdout\n else:\n out = open(options.output_filename, 'w')\n run(out, options.format)\n if out is not sys.stdout:\n out.close()", "def main():\n parser = argparse.ArgumentParser(\n description='A testbench for the Google Cloud C++ Client Library')\n parser.add_argument('--host', default='localhost',\n help='The listening port')\n parser.add_argument('--port', help='The listening port')\n # By default we do not turn on the debugging. This typically runs inside a\n # Docker image, with a uid that has not entry in /etc/passwd, and the\n # werkzeug debugger crashes in that environment (as it should probably).\n parser.add_argument('--debug', help='Use the WSGI debugger',\n default=False, action='store_true')\n arguments = parser.parse_args()\n\n # Compose the different WSGI applications.\n application = wsgi.DispatcherMiddleware(root, {\n '/httpbin': httpbin.app,\n GCS_HANDLER_PATH: gcs,\n UPLOAD_HANDLER_PATH: upload,\n })\n serving.run_simple(arguments.host, int(arguments.port), application,\n use_reloader=True, use_debugger=arguments.debug,\n use_evalex=True)", "def main():\n test_merge_quick_sort()\n test_compare()", "def __main() :\n launchTests()", "def main():\n tester = Tester()\n # parse args, load configuration and create all required objects.\n tester.setup_experiment()\n # GO!\n tester.run_experiment()", "def main(argv=None):\n\n if argv is None:\n argv = sys.argv\n\n # setup command line parser\n parser = E.ArgumentParser(description=__doc__)\n\n parser.add_argument(\"--ID-file\", dest=\"ID_file\", type=str,\n help=\"Supply txt file with Sample Name assigned to Sequencing Number\")\n\n parser.add_argument(\"--Infile-dir\", dest=\"Infile_Dir\", type=str,\n help=\"Supply path to files to be moved\")\n\n parser.add_argument(\"--Outfile-ID-1\", dest=\"Outfile1_ID\", type=str,\n help=\"Supply identifier that can be used to move file to a particular directory e.g. CMS\")\n\n parser.add_argument(\"--Outfile-ID-2\", dest=\"Outfile2_ID\", type=str,\n help=\"Supply identifier within file name that can be used to move file to a particular directory e.g. GFU\")\n\n parser.add_argument(\"--Outfile-dir-1\", dest=\"Outfile1_Dir\", type=str,\n help=\"Supply desired directory for file containing identifier given in --Outfile-ID-1\")\n\n parser.add_argument(\"--Outfile-dir-2\", dest=\"Outfile2_Dir\", type=str,\n help=\"Supply desired directory for file containing identifier given in --Outfile-ID-2\")\n\n \n # add common options (-h/--help, ...) and parse command line\n (args) = E.start(parser, argv=argv)\n\n ###############################################\n ###############################################\n ############## Execute Functions ##############\n ###############################################\n ###############################################\n\n\n IDDict = CreateIDDict(args.ID_file)\n Shuttle(IDDict, args.Infile_Dir, args.Outfile1_ID, args.Outfile2_ID, args.Outfile1_Dir, args.Outfile2_Dir)\n \n \n # write footer and output benchmark information.\n E.stop()", "def main():\r\n args = getargs()\r\n dir_name = args.dir_name\r\n url = args.url\r\n fetch_junit(dir_name, url)", "def main():\n tng.api.runner()", "def main():\n args = parse_args(sys.argv[1:])\n\n if args.version:\n print(birdvoxclassify.version.version)\n return\n\n if args.quiet:\n logger_level = 30\n elif args.verbose:\n logger_level = 20\n else:\n logger_level = 25\n\n run(args.inputs,\n output_dir=args.output_dir,\n output_summary_path=args.output_summary_path,\n model_name=args.model_name,\n batch_size=args.batch_size,\n select_best_candidates=args.select_best_candidates,\n hierarchical_consistency=args.hierarchical_consistency,\n suffix=args.suffix,\n logger_level=logger_level)", "def _runner(self, classpath, main, jvm_options, args):", "def entry_point() -> int:\n return run(argv=sys.argv[1:], stdout=sys.stdout, stderr=sys.stderr)", "def Stop():\n\n if global_options.loglevel >= 1 and global_benchmark:\n t = time.time() - global_starting_time\n global_options.stdlog.write(\n \"######### Time spent in benchmarked functions #########\\n\")\n global_options.stdlog.write(\"# function\\tseconds\\tpercent\\n\")\n for key, value in global_benchmark.items():\n global_options.stdlog.write(\n \"# %s\\t%6i\\t%5.2f%%\\n\" % (key, value,\n (100.0 * float(value) / t)))\n global_options.stdlog.write(\n \"#######################################################\\n\")\n\n if global_options.loglevel >= 1:\n global_options.stdlog.write(getFooter() + \"\\n\")\n\n # close files\n if global_options.stdout != sys.stdout:\n global_options.stdout.close()\n # do not close log, otherwise error occurs in atext.py\n # if global_options.stdlog != sys.stdout:\n # global_options.stdlog.close()\n\n if global_options.stderr != sys.stderr:\n global_options.stderr.close()\n\n if global_options.timeit_file:\n\n outfile = open(global_options.timeit_file, \"a\")\n\n if global_options.timeit_header:\n outfile.write(\"\\t\".join(\n (\"name\", \"wall\", \"user\", \"sys\", \"cuser\", \"csys\",\n \"host\", \"system\", \"release\", \"machine\",\n \"start\", \"end\", \"path\", \"cmd\")) + \"\\n\")\n\n csystem, host, release, version, machine = map(str, os.uname())\n uusr, usys, c_usr, c_sys = map(lambda x: \"%5.2f\" % x, os.times()[:4])\n t_end = time.time()\n c_wall = \"%5.2f\" % (t_end - global_starting_time)\n\n if sys.argv[0] == \"run.py\":\n cmd = global_args[0]\n if len(global_args) > 1:\n cmd += \" '\" + \"' '\".join(global_args[1:]) + \"'\"\n else:\n cmd = sys.argv[0]\n\n result = \"\\t\".join((global_options.timeit_name,\n c_wall, uusr, usys, c_usr, c_sys,\n host, csystem, release, machine,\n time.asctime(time.localtime(global_starting_time)),\n time.asctime(time.localtime(t_end)),\n os.path.abspath(os.getcwd()),\n cmd)) + \"\\n\"\n\n outfile.write(result)\n outfile.close()", "def main():\n\n # Chdir into script directory so to properly resolve relative paths in configuration\n os.chdir(os.path.dirname(os.path.realpath(__file__)) + \"/\")\n\n # Disable proxy as we access localhost, both to avoid overhead and issues with proxy misconfiguration\n os.environ['NO_PROXY'] = '*'\n\n # Stop any GraphDB server that we previously started and is possibly still around due to script interruption/crash\n shell(f\"{cmd_graphdb} stopall\")\n\n # Generate synthetic traces, both for populating the repositories and for the {sf, sp, pf, pp} tests\n prepare_traces()\n \n # Generate central repositories (if needed)\n for size, approach in itertools.product(sizes, approaches):\n prepare_repository(size, approach)\n \n # Run experiments (if needed)\n for size, approach in itertools.product(sizes, approaches):\n run_experiments(size, approach)", "def main():\n parser = argparse.ArgumentParser(description='Implementation of the Naive Bayes and Perceptron classifiers')\n parser.add_argument('--statsmode', help='whether to gather stats or not', choices=['y','Y','N','n'], default='n')\n parser.add_argument('--classifier', help='classifier to use', choices=['BAYES', 'PERCEPTRON'], required=True)\n parser.add_argument('--mode', help='image class to test', choices=['VALIDATION', 'TEST'], default='TEST')\n parser.add_argument('--type', help='image type to train', choices=['DIGIT', 'FACE', 'MNIST'], required=True)\n parser.add_argument('--range', metavar=('START', 'END_EXCLUSIVE'), nargs=2, type=int, help='Range of data to test', default=[0, 100])\n parser.add_argument('--trainpercent', metavar='PERCENT', type=int, help='the percent of training data to use (int out of 100)', default=100, dest='percentage')\n parser.add_argument('--smoothing', type=int, help='Laplace smoothing constant (Naive Bayes)', default=2)\n parser.add_argument('--iterations', type=int, help='Number of times to iterate over training data (Perceptron)', default=5)\n parser.add_argument('--debug', help='Outputs more detailed information to stdout', action='store_true')\n parser.add_argument('--statloops', type=int, help='Number of times the classifier iterates over test data (Statistics only)', default=5)\n args = parser.parse_args()\n # image_type = ImageType.DIGIT if args.type == 'DIGIT' else ImageType.FACE\n image_type = None\n if args.type == 'DIGIT':\n image_type = ImageType.DIGIT\n elif args.type == 'FACE':\n image_type = ImageType.FACE\n else:\n image_type = ImageType.MNIST\n mode = Mode.TEST if args.mode == 'TEST' else Mode.VALIDATION\n if args.statsmode == 'y' or args.statsmode == 'Y':\n run_percentages_classifier(args.classifier, image_type, args)\n else:\n run = run_classifier_bayes if args.classifier == 'BAYES' else run_classifier_perceptron\n run(mode, image_type, args)", "def _auto_run(args):\n\n # TDH (2020-01-13) For developement testing the following section\n # replicates the functionality of \"standard_analysis.py\" so that\n # json_results can be created and used to create the graph image\n # files.\n import benchmark_postprocessing as bmpp\n file_list = bmpp.get_benchmark_files(args.benchmark_results_dir)\n json_results = bmpp.parse_files(file_list)\n json_results = bmpp.parse_and_add_benchmark_metadata(json_results)\n run_id_list = get_unique_run_ids(json_results)\n\n # TDH (2020-01-13) - Create unqiue reports for each run ID found.\n # Even a single results directory can contain results from multiple\n # run IDs.\n for run_id in run_id_list:\n output_path = os.path.join(\n args.benchmark_results_dir,\n '{}_report'.format(run_id))\n\n # TDH: Thorough attempt to safely create the results directory and\n # provide good error reporting if something went wrong.\n try:\n os.mkdir(output_path)\n except OSError:\n logging.error('Failed to create directory for report at {}'.format(\n output_path))\n create_standard_analysis_report(output_path,\n json_results,\n run_id)", "def main() -> None:", "def main() -> None:", "def main() -> None:", "def main() -> None:", "def main(args=None):\n args = args or sys.argv[1:]\n parser = parse_options()\n common.main_cli(experiment_parse_and_run, parser, args)", "def main(args):\n\n if 'log' in args and args['log'] is not None:\n logging.basicConfig(level=LOGGING_LEVELS.get(args['log'].lower(), logging.NOTSET))\n\n test_structure = read_test_file(args['test'])\n tests = build_testsets(args['url'], test_structure)\n\n # Override configs from command line if config set\n for t in tests:\n if 'print_bodies' in args and args['print_bodies'] is not None:\n t.config.print_bodies = safe_to_bool(args['print_bodies'])\n\n if 'interactive' in args and args['interactive'] is not None:\n t.config.interactive = safe_to_bool(args['interactive'])\n\n # Execute all testsets\n failures = execute_testsets(tests)\n\n sys.exit(failures)", "def _main(args):\n start = datetime.now()\n try:\n main.main(args, standalone_mode=False)\n except Abort:\n click.secho(\"Operation aborted!\", fg=\"yellow\", bold=True)\n except Exception as err: # pylint: disable=broad-except\n click.secho(\"Error: \" + str(err), fg=\"red\")\n return 1\n finally:\n if \"--help\" not in sys.argv:\n # display overall runtime for reference when performing update\n end = datetime.now()\n runtime = end - start\n click.secho(\"Operation complete. Total runtime: \" + humanize.naturaldelta(runtime), fg=\"green\")\n return 0", "def main(argv: Sequence[Text]) -> None:\n\n\n print(\"TODO\")", "def main() -> None:\n return", "def run_main():\n # Matching lines against a matcher function.\n matched_lines = match_file(file_names, matcher)\n\n # Will contain data sorted by file.\n binned_data = {}\n\n # Looking through the lines that were inserted into the metrics file via the metrics component.\n for key in matched_lines:\n\n # Grabbing matched lines by the file or orgination.\n buffer = matched_lines[key]\n\n # This will contain dictionaries converted from JSON.\n data = []\n\n # Loop through the collection, appending data converted from JSON entries.\n for line in buffer:\n data.append(extract_data(line))\n\n # Sort the data by file.\n binned_data[key] = sort_data(data)\n\n # Output the final results.\n generate_statistics(binned_data)\n return 0", "def main():\n # Load and parse json object from file with specific\n file_name = \"./benchmark.log\"\n doc = re.sub(\"[\\n|\\t]\", \"\", \"\".join(benchmark.read_text_file(file_name)))\n json_object = json.loads(\"\".join(doc))\n\n intervals = json_object[\"intervals\"]\n\n socket_keys = benchmark.get_socket_keys(intervals)\n\n result = benchmark.get_result_dictionary(intervals, socket_keys)\n\n print_to_csv(result, socket_keys)", "def cli(argv):\r\n argv.append(\"--exhaust-materials\")\r\n cltestbench.cli(argv)", "def main():\n parser = argparse.ArgumentParser(description=MAIN_DESCRIPTION)\n parser.add_argument('-a', '--algorithm', help=ALGORITHM_DESCRIPTION)\n parser.add_argument('-n', '--number', type=int, help=NUMBER_DESCRIPTION)\n parser.add_argument('-o', '--order', help=ORDER_DESCRIPTION)\n parser.add_argument('-s', '--size', help=SIZE_DESCRIPTION)\n args = parser.parse_args()\n try:\n if not (args.algorithm and args.number and args.order and args.size):\n raise ValueError\n create_structure()\n try:\n data = get_data(args.number, args.order, args.size)\n except IOError:\n data = generate_in_files(args.number, args.order, args.size)\n finally:\n alg, out, time = sorting_algorithm(data, args.algorithm)\n # generate_out_files(out, args.number)\n generate_log_file(args.algorithm, args.number, args.order,\n args.size, alg.compares, alg.moves, time)\n except (TypeError, UnboundLocalError, ValueError) as e:\n parser.print_help()" ]
[ "0.7575131", "0.736125", "0.7116626", "0.69657815", "0.6858612", "0.677702", "0.6768471", "0.6749162", "0.673816", "0.67165655", "0.6619266", "0.66101116", "0.65930367", "0.6569376", "0.65495706", "0.65495706", "0.6527374", "0.6446763", "0.6446198", "0.63803476", "0.632811", "0.632811", "0.6301287", "0.62862796", "0.62689364", "0.62689364", "0.62689364", "0.62689364", "0.62689364", "0.62689364", "0.62689364", "0.62689364", "0.6249929", "0.62472117", "0.62456286", "0.623213", "0.623213", "0.62221855", "0.62145513", "0.6209525", "0.61859804", "0.6182812", "0.6176425", "0.6174159", "0.61517024", "0.61267924", "0.6123793", "0.61221063", "0.61100817", "0.61009204", "0.60918", "0.6082367", "0.6076957", "0.6072928", "0.6070759", "0.60696685", "0.6067896", "0.6059737", "0.60579824", "0.6038099", "0.60342664", "0.6031994", "0.6029206", "0.6028169", "0.59891194", "0.59854287", "0.5973939", "0.5973353", "0.59721786", "0.597117", "0.59694105", "0.59626824", "0.5959913", "0.5959511", "0.5956461", "0.595032", "0.5946937", "0.593944", "0.5928414", "0.5925564", "0.59232116", "0.59224904", "0.59212965", "0.5911547", "0.5897088", "0.58762234", "0.58750176", "0.5871719", "0.5871719", "0.5871719", "0.5871719", "0.5862883", "0.5860028", "0.58559984", "0.5852507", "0.58517283", "0.5850786", "0.58377516", "0.5834743", "0.5830594" ]
0.6749548
7
Call make_simulations and return the first simulation.
def make_simulation(self): if self.skip_reference: self.units = 'time steps per second' else: self.units = 'calls per second' self.reference_sim, self.compare_sim = self.make_simulations() return self.reference_sim
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_simulations(self):\n pass", "def get_simulation(self, _id):\n\n simulation = self.collection.find_one({'_id': ObjectId(_id)})\n\n return simulation", "def fixture_sim():\n\tEXAMPLE_FILE_FOLDER = str(MODULE_DIR) + \"/data/nail1/\"\n\tsim = read.load_sim(EXAMPLE_FILE_FOLDER)\n\treturn sim", "def simulation_fixture():\n input_data = {\n \"Tools\": {\"ForwardEuler\": {}},\n \"Clock\": {\"start_time\": 0, \"end_time\": 1, \"dt\": 1},\n \"PhysicsModules\": {},\n \"Diagnostics\": {},\n }\n simulation = Simulation(input_data)\n simulation.prepare_simulation()\n return simulation", "def __next__(self):\n if self.count < len(self.simulation_paths):\n simulation = self.load_simulation(self.count)\n self.count += 1\n return simulation\n else:\n raise StopIteration", "def get_simulator(self) -> Game:\n return self.__sim", "def sim(self) -> Sim:\n\n return self._sim", "def run_simulation(self):\n self._data = msprime.sim_ancestry(\n recombination_rate=self.recombination_rate,\n sequence_length=self.len,\n num_replicates=self.num_replicates,\n demography=self.demographic_events,\n model=self.model,\n random_seed=self.random_seed,\n samples=self.sample_size)\n return self._data", "def sim_data(gen_params, sim_model, n_samples=None, rng=np.random):\n\n if n_samples is None:\n ps, xs = sim_data(gen_params, sim_model, n_samples=1, rng=rng)\n return ps[0], xs[0]\n\n assert n_samples > 0\n\n ps = None\n xs = None\n\n while True:\n\n # simulate parameters and data\n ps = gen_params(n_samples, rng=rng)\n xs = sim_model(ps, rng=rng)\n\n # filter out simulations that failed\n idx = [x is not None for x in xs]\n\n if not np.any(idx):\n continue\n\n if not np.all(idx):\n ps = np.stack(ps[idx])\n xs = np.stack(xs[idx])\n\n break # we'll break only when we have at least one successful simulation\n\n n_rem = n_samples - ps.shape[0]\n assert n_rem < n_samples\n\n if n_rem > 0:\n # request remaining simulations\n ps_rem, xs_rem = sim_data(gen_params, sim_model, n_rem, rng)\n ps = np.concatenate([ps, ps_rem], axis=0)\n xs = np.concatenate([xs, xs_rem], axis=0)\n\n assert ps.shape[0] == xs.shape[0] == n_samples\n\n return ps, xs", "def make_simulation(self):\n pass", "def runSimulation(num_robots, speed, width, height, min_coverage, num_trials,\n robot_type, visualize):\n #initialization of variables\n list_of_results = []\n \n #trial loop\n for i in range(num_trials):\n list_of_results.append(singleSimulation(num_robots, speed, width, height, min_coverage, robot_type, visualize))\n return list_of_results", "def get_simulation():\n simulation = ba.GISASSimulation()\n simulation.setBeamParameters(12.8*ba.angstrom, 90.0*ba.deg, 0.0*ba.deg)\n simulation.setDetector(get_kws3_detector())\n simulation.setDetectorResolutionFunction(ba.ResolutionFunction2DGaussian(5.0, 5.0))\n simulation.setBeamIntensity(1.0e-4)\n distr_1 = ba.DistributionGaussian(1.28*ba.nm, 0.1)\n simulation.addParameterDistribution(\"*/Beam/Wavelength\", distr_1, 50, 2.0, ba.RealLimits.positive())\n simulation.getOptions().setIncludeSpecular(True)\n return simulation", "def run_single(self):\n self.run_sim_time(1)", "def simulateOneTimeStep(self):\n\n self.susceptibleToInfected()\n self.infectedToRecovered()\n\n # add the new values of healthy/infected/recovered to the arrays keeping track\n SIR_t = np.array([self.getSusceptible(), self.getInfected(), self.getRecovered()])\n #update SIR time series\n self.SIR = np.concatenate([self.SIR, SIR_t[:,np.newaxis]], axis=1)\n\n # add the new snapshot of the simulation\n self.snapshots.append(self.getSpace().copy())", "def getSimulation(self):\r\n raise NotImplementedError()", "def simulate(self, num_games):\r\n # self.runs = num_games #Initializes a tracker for the number of runs\r\n for _ in range(num_games):\r\n self.results.append(self._simulate_once())\r\n return self.results", "def sim(self):\n return self.mujoco_simulation.sim", "def run_one_body_sampling(self):\n\n # Initialize the posistions for each new Monte Carlo run\n positions = np.random.rand(self.num_p, self.num_d)\n # Initialize the distance matrix\n self.s.positions_distances(positions)\n # check if the wave function is zero\n while True:\n test_wavefunction = self.w.wavefunction(positions)\n if test_wavefunction**2 <= 1e-14:\n # Initialize the posistions for each new Monte Carlo run\n positions = np.random.rand(self.num_p, self.num_d)\n # Initialize the distance matrix\n self.s.positions_distances(positions)\n else:\n break\n # Initialize sampler method for each new Monte Carlo run\n self.sam.initialize()\n density_adding = np.zeros(41)\n\n # Run Metropolis while finding one body density\n for i in range(self.mc_cycles):\n new_positions = self.metropolis_step(positions)\n positions = new_positions\n density = self.one_body_density(positions)\n density_adding += density\n # self.sam.sample_values(positions)\n\n # self.sam.average_values(self.mc_cycles)\n # self.print_averages()\n\n return density_adding", "def run_default(self):\n sim_run = self._runModel()\n\n self.default_runs.append(sim_run)\n\n return sim_run", "def create_new_sim(save_file):\n sim = simulation.Simulation(save_file, 10)\n return sim", "def sim(self):\n return self._sim", "def simulate(self):\n return self._simulate", "def make_sims(self):\n self.sims = [Simulation(conf=c) for c in self.sim_confs]", "def get_last_run_dynamics_simulator(self) -> tudatpy.kernel.simulation.propagation_setup.SingleArcDynamicsSimulator:\n return self.dynamics_simulator", "def runSimulations(self):\n oldConditionData = self.oldSim.simulate()\n newConditionData = self.newSim.simulate()\n return (oldConditionData, newConditionData)", "def pick_one(self):\n index = 0\n r = random.random()\n while r >= 0:\n r = r - self.normalised_fitness[index]\n index += 1\n index -= 1\n return self.population[index]", "def run_simulation(self, num_games=10):\n for _ in range(num_games):\n self.result.append(self.single_game())", "def sr_sim_data(self, sim_name=None, sim_type=None):\n from pykern import pkunit\n from pykern.pkdebug import pkdpretty\n\n self.sr_sim_type_set(sim_type)\n\n if not sim_name:\n sim_name = \"Scooby Doo\"\n d = self.sr_post(\n \"listSimulations\",\n PKDict(\n simulationType=self.sr_sim_type,\n search=PKDict({\"simulation.name\": sim_name}),\n ),\n )\n assert 1 == len(d), \"listSimulations name={} returned count={}\".format(\n sim_name, len(d)\n )\n d = d[0].simulation\n res = self.sr_get_json(\n \"simulationData\",\n PKDict(\n simulation_type=self.sr_sim_type,\n pretty=\"0\",\n simulation_id=d.simulationId,\n ),\n )\n pkunit.pkeq(sim_name, res.models.simulation.name)\n return res", "def load_simulation(self, index):\n simulation_path = join(self.path, self.simulation_paths[index])\n return GrowthSimulation.load(simulation_path)", "def simulation_factory(device):\n\n def make_simulation(snapshot=None, domain_decomposition=None):\n sim = Simulation(device)\n\n # reduce sorter grid to avoid Hilbert curve overhead in unit tests\n for tuner in sim.operations.tuners:\n if isinstance(tuner, hoomd.tune.ParticleSorter):\n tuner.grid = 8\n\n if snapshot is not None:\n if domain_decomposition is None:\n sim.create_state_from_snapshot(snapshot)\n else:\n sim.create_state_from_snapshot(snapshot, domain_decomposition)\n sim.seed = 22765\n return sim\n\n return make_simulation", "def call(self):\n\n self.cross()\n self.mutation()\n self.selection()\n \n return self.population[0]", "def simulation():\n\n return {\n \"type\": \"class\",\n \"base\": \"iso.process_step\",\n \"is_abstract\": False,\n \"is_document\": True,\n \"pstr\": (\"({}/{}/{})\", (\"used\", \"ran_for_experiments\", \"ensemble_id\")),\n \"properties\": [\n (\n \"part_of_project\",\n \"linked_to(designing.project)\",\n \"1.N\",\n \"Project or projects for which simulation was run\",\n ),\n (\n \"ran_for_experiments\",\n \"linked_to(designing.numerical_experiment)\",\n \"1.N\",\n \"One or more experiments with which the simulation is \"\n \"associated\",\n ),\n (\n \"sub_experiment\",\n \"linked_to(designing.numerical_experiment)\",\n \"0.1\",\n \"For start-date ensembles, this will indicate the beginning \"\n \"year; for offline models driven by output from another \"\n \"model, this will provide the source_id and variant_label \"\n \"for the 'driving' model.\",\n ),\n (\n \"used\",\n \"linked_to(science.model)\",\n \"1.1\",\n \"The model used to run the simulation\",\n ),\n (\n \"primary_ensemble\",\n \"linked_to(activity.ensemble)\",\n \"0.1\",\n \"Primary Ensemble (ensemble for which this simulation was \"\n \"first run).\",\n ),\n (\n \"institution\",\n \"linked_to(shared.party)\",\n \"0.1\",\n \"institution which carried out the simulation\",\n ),\n (\n \"parent_of\",\n \"linked_to(activity.child_simulation)\",\n \"0.N\",\n \"If appropriate, links to simulations which branched from \"\n \"this one\",\n ),\n (\n \"produced\",\n \"linked_to(data.dataset)\",\n \"0.N\",\n \"Products of the simulation\",\n ),\n (\n \"had_performance\",\n \"linked_to(platform.performance)\",\n \"0.1\",\n \"Performance of the simulation.\",\n ),\n (\n \"ran_on\",\n \"linked_to(platform.machine)\",\n \"0.1\",\n \"The machine on which the simulation was run.\",\n ),\n (\n \"errata\",\n \"shared.online_resource\",\n \"0.1\",\n \"Link to errata associated with this simulation.\",\n ),\n (\n \"ensemble_id\",\n \"activity.axis_member\",\n \"0.N\",\n \"Identification within ensemble axes via axis member. \"\n \"(Multiple axis members within a simulation cannot share the \"\n \"same ensemble_axis.) (There must be an axis_member instance \"\n \"for each ensemble axis in a parent ensemble.)\",\n ),\n # Time\n (\n \"start_time\",\n \"time.date_time\",\n \"0.1\",\n \"The start date-time of the simulation. e.g. \"\n \"2012-04-01 00:00:00\",\n ),\n (\n \"end_time\",\n \"time.date_time\",\n \"0.1\",\n \"The end date-time of the simulation. e.g. \"\n \"2087-11-30 12:00:00\",\n ),\n (\n \"calendar\",\n \"time.calendar\",\n \"0.1\",\n \"The calendar used in the simulation\",\n ),\n # Further Info URL\n (\n \"documentation\",\n \"shared.online_resource\",\n \"0.1\",\n \"On-line location of additional documentation\",\n ),\n # Extra attributes\n (\n \"extra_attributes\",\n \"shared.extra_attribute\",\n \"0.N\",\n \"Additional attributes provided with simulation.\",\n ),\n ],\n \"constraints\": [\n (\"cardinality\", \"rationale\", \"0.0\"),\n ],\n }", "def First():\n return CheckForError(lib.Generators_Get_First())", "def simgr(self):\n return self.project.factory.simgr(self.state)", "def test_run_sim_1():\n rnd = rand.Arrivals(36, 41)\n sim.run_sim(3, 2, 5, 6, 22, rnd)", "def run_sim(mass, start, stop, sampling_rate):\n axion = Axion(mass=mass)\n return axion.do_fast_axion_sim(start,\n stop,\n sampling_rate)", "def initialise_sim(self):\n pass", "def create_all_background_simulations(self):\r\n simulations = []\r\n simulations_ids = count(1)\r\n simulations_permutations = self.create_permutations()\r\n sim_viz = SimulationViz.BACK\r\n for sim_id, permutation in zip(simulations_ids, simulations_permutations):\r\n mutation_rules = self.create_mutation_rules(permutation.mutation_rate)\r\n mutation_maker = self.create_mutation_maker(mutation_rules)\r\n simulation_stats = self.create_simulation_stats(sim_id)\r\n breeding_rules = self.create_breading_rules(permutation.elitist_candidates, permutation.elitism_rate,\r\n permutation.discard_rate)\r\n breeder = self.create_breeder(breeding_rules)\r\n simulation = self.create_simulation(sim_id, permutation.num_of_solutions, mutation_maker, simulation_stats,\r\n breeder, sim_viz)\r\n simulations.append(simulation)\r\n if len(simulations) == self.subprocesses:\r\n yield simulations \r\n simulations = []\r\n yield simulations", "def single_simulation(simulation_args):\n window = simulation_args['window']\n cycle_count = simulation_args['cycle_count']\n model_dir = simulation_args['model_dir']\n test_x = simulation_args['test_x']\n test_y = simulation_args['test_y']\n\n # The sensor domain\n domain = 0\n\n # The model's prediction horizon\n horizon = 1\n\n # The model's buffer size\n buffer = window\n\n # Create the device\n device = HapDev(buffer_size=buffer,\n network_delay=meta.network_delay,\n window=window,\n horizon=horizon)\n\n # Create a sensor\n sensor = Sensor(domain=domain,\n buffer_size=buffer,\n dataset=test_x,\n dataset_labels=test_y,\n label_counter=window)\n device.add_sensor(sensor)\n\n # Load the ML model\n custom_objects = {'BL': BL,\n 'TABL': TABL,\n 'MaxNorm': tf.keras.constraints.max_norm}\n try:\n model = load_model(Path('../Zoo/Results/runs/' + model_dir + '/model'), custom_objects=custom_objects)\n except:\n model = load_model(Path(model_dir + '/model'), custom_objects=custom_objects)\n\n device.receive_model(model)\n\n # Lists to hold simulation results\n accuracy_list = []\n predicted_labels = []\n true_labels = []\n run_times = []\n debug_times = []\n\n # Run x steps of simulation\n # for i in tqdm.tqdm(range(cycle_count), desc='Running simulation cycles'):\n for i in range(cycle_count):\n if sensor.check_end():\n break\n accuracy, predicted_label, true_label, run_time, debug_results = device.run_one_cycle(domain)\n debug_times.append(debug_results)\n accuracy_list.append(accuracy)\n predicted_labels.append(predicted_label)\n true_labels.append(true_label)\n run_times.append(run_time)\n\n model_name = PurePath(model_dir)\n results_path = '../Results/' + model_name.name\n Path(results_path).mkdir(parents=False, exist_ok=True)\n\n # Save the simulation data\n simulation_results = {'True_labels': true_labels,\n 'Predicted_labels': predicted_labels,\n 'Accuracy': accuracy_list,\n 'Run_times': run_times}\n\n # Save debug data\n debug_df = pd.DataFrame(debug_times)\n debug_df.to_csv(results_path + '/debug_times.csv')\n\n simulation_results_df = pd.DataFrame(simulation_results)\n simulation_results_df.to_csv((results_path + '/Results_cycles-{cycle_count}_sensorID-{sensor}.csv').format(\n cycle_count=cycle_count,\n sensor=domain\n ), index_label='Cycle')\n\n # Plot the results\n plot_simulation_history(predicted_labels, true_labels, accuracy_list, run_times, results_path, domain, cycle_count)", "def prepare_simulation(master_seed, n_populations):\n nest.ResetKernel()\n # set global kernel parameters\n nest.SetKernelStatus(\n {\"communicate_allgather\": sim.allgather,\n \"overwrite_files\": sim.overwrite_existing_files,\n \"resolution\": sim.dt,\n \"total_num_virtual_procs\": sim.n_vp})\n if sim.to_text_file:\n nest.SetKernelStatus({\"data_path\": data_path_test})\n \n # Set random seeds\n \n # PYNEST\n #nest.sli_run('0 << /rngs [%i %i] Range { rngdict/gsl_mt19937 :: exch CreateRNG } Map >> SetStatus'%(\n # master_seed, master_seed + sim.n_vp - 1))\n #nest.SetKernelStatus({\"rng_seeds\" : range(master_seed, master_seed + sim.n_vp)})\n #nest.sli_run('0 << /grng rngdict/gsl_mt19937 :: %i CreateRNG >> SetStatus'%(master_seed + sim.n_vp))\n #nest.SetKernelStatus({\"grng_seed\" : master_seed + sim.n_vp})\n #pyrngs = [np.random.RandomState(s) for s in \n # range(master_seed + sim.n_vp + 1, master_seed + 2 * sim.n_vp + 1)]\n\n # SLI VERSION\n sli_str = \"0 << \\n\"\n #sli_str += \"/rngs %i [0 %i 1 sub] add Range { rngdict/gsl_mt19937 :: exch CreateRNG } Map\\n\"%(master_seed, sim.n_vp) # local RNG, seeded\n #sli_str += \"/grng rngdict/gsl_mt19937 :: %i %i add CreateRNG\\n\"%(master_seed, sim.n_vp) # global RNG\n sli_str += \"/rng_seeds %i [0 %i 1 sub] add Range\\n\"%(master_seed, sim.n_vp) # local RNG seeds\n sli_str += \"/grng_seed %i %i add\\n\"%(master_seed, sim.n_vp) # global RNG seed\n sli_str += \">> SetStatus\"\n nest.sli_run(sli_str)\n sli_str2 = \"/script_rngs [%i]\\n\"%sim.n_vp\n sli_str2 += \"{%i add rngdict /gsl_mt19937 get exch CreateRNG } Table def\\n\"%(master_seed + sim.n_vp)\n sli_str2 += \"/normal_rdvs script_rngs { rdevdict /normal get CreateRDV } Map def\"\n nest.sli_run(sli_str2)\n pyrngs = None\n return pyrngs", "def generate_random_solution(self):\n # \"Generate random solution\"\n Individual.COUNT += 1\n if INITIALPOP == 'random':\n # Choose randomly a file in the original dataset.\n seed = random.choice(starting_seeds)\n Individual.SEEDS.add(seed)\n elif INITIALPOP == 'seeded':\n # Choose sequentially the inputs from the seed list.\n # NOTE: number of seeds should be no less than the initial population\n assert (len(starting_seeds) == POPSIZE)\n seed = starting_seeds[Individual.COUNT - 1]\n Individual.SEEDS.add(seed)\n\n digit1 = generate_digit(seed)\n digit1.is_original = True\n individual = Individual(digit1, seed)\n individual.seed = seed\n\n return individual", "def symbol_execute_always_choice_first(self):\n rst = []\n start_s = self.p.factory.blank_state(addr=self.start + 1, option=[angr.sim_options.CALLLESS])\n sm: angr.SimulationManager = self.p.factory.simulation_manager(start_s)\n\n while True:\n one_active = sm.one_active\n rst.append(one_active)\n print(one_active)\n if len(sm.active) > 0:\n sm.active = [one_active]\n if self.is_state_return(one_active):\n break\n sm.step(selector_func=set_callless_to_state)\n return rst", "def simulate(self, **args):\n snr = ct.c_double * 3\n self.sim_params = {**self.sim_params, **args}\n snr = snr(*self.sim_params[\"snr\"])\n dec_param = decoder_param(self.sim_params[\"earlyTerm\"], self.sim_params[\"iterations\"], self.sim_params[\"decoding\"].encode(\"utf-8\"))\n ch_param = channel_param(self.sim_params[\"seed\"], snr, self.sim_params[\"channel\"].encode(\"utf-8\"))\n sim_param = simulation_param(self.sim_params[\"threads\"], self.sim_params[\"maxFrames\"], self.sim_params[\"fec\"], \"\".encode(\"utf-8\"))\n\n def sim_thread():\n self.sim_stop_flag.value = False\n\n self.lib.argtypes = (decoder_param, channel_param, simulation_param, sim_results_t, ct.c_bool)\n self.lib.simulate(\n dec_param,\n ch_param,\n sim_param, \n ct.byref(self.sim_results_struct),\n ct.byref(self.sim_stop_flag)\n )\n \n th_sim = threading.Thread(target=sim_thread)\n th_sim.start()", "def run(sim_attr_generator):\n#TODO: clean\n#TODO: integrate analyses\n def analyze_and_save(simulation,simulation_attributes):\n#? Ugly conf file analyses integration.\n if simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving analyses for {0}.\".format(simulation_attributes.id_name),2)\n results = analyze_datas(\n simulation.result,\n simulation_attributes.analyses\n )\n plotables = ana_results_to_plotables(\n results,\n simulation_attributes.analyses\n )\n#TODO error handling for save\n analysis_save_dm(\n results,\n plotables,\n simulation_attributes.analyses,\n simulation_attributes.id_name\n )\n\n def save_simulation(simulation,simulation_attributes):\n if not simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving simulation datas of {0}.\".format(\n simulation_attributes.id_name\n ),2) \n try:\n np.save(\n simulation_attributes.id_name,\n simulation.result\n )\n except:\n raise EnvironmentError(\"Can't save data to {}.\".format(\n simulation_attributes.id_name\n ))\n\n verbose_print(\"Starting simulation run.\",1)\n for i,simulation_attributes in enumerate(sim_attr_generator):\n verbose_print(\"Starting simulation number {0}: {1}\".format(\n i,\n simulation_attributes.id_name\n ),2)\n simulation = Simulation(\n SimulationVariables(simulation_attributes)\n )\n simulation.start()\n save_simulation(simulation,simulation_attributes)\n analyze_and_save(simulation,simulation_attributes)", "def generate_simulation(self, do_precession=False, gamma=1.76e11,\n load_mu_s=False):\n\n # Mesh ----------------------------------------------------------------\n\n if self.simulation == 'experimental_sample':\n self.sim_from_image = sfi.sim_from_image(sfi.default_image)\n\n if not load_mu_s:\n self.sim_from_image.generate_magnetic_moments(mu_s=self.mu_s)\n else:\n self.sim_from_image.generate_magnetic_moments(\n load_file=load_mu_s)\n\n self.sim = self.sim_from_image.sim\n\n elif self.simulation == '2D_square':\n # A square sized hexagonal mesh\n mesh = HexagonalMesh(self.mesh_a * 0.5, self.mesh_nx, self.mesh_ny,\n # periodicity=(True, True),\n alignment='square',\n unit_length=1e-9\n )\n self.sim = Sim(mesh)\n\n # If we use polygon mesh tools, we can use a hexagon shaped mesh\n # self.sim.mu_s = self.mu_s_in_hexagon\n\n self.sim.mu_s = self.mu_s\n\n elif self.simulation == '1D_chain':\n # A 1D chain using a cuboid mesh\n mesh = CuboidMesh(dx=self.mesh_a, nx=self.mesh_nx,\n ny=1, nz=1,\n # periodicity=(True, True),\n unit_length=1e-9\n )\n self.sim = Sim(mesh)\n\n # If we use polygon mesh tools, we can use a hexagon shaped mesh\n # self.sim.mu_s = self.mu_s_in_hexagon\n\n self.sim.mu_s = self.mu_s\n\n self.sim.driver.do_precession = do_precession\n self.sim.driver.gamma = gamma\n\n # Interactions --------------------------------------------------------\n\n exch = UniformExchange(self.J)\n self.sim.add(exch)\n\n dmi = DMI(D=(self.D), dmi_type='interfacial')\n self.sim.add(dmi)\n\n zeeman = Zeeman((self.B[0], self.B[1], self.B[2]))\n self.sim.add(zeeman, save_field=True)\n\n if self.ku:\n # Uniaxial anisotropy along + z-axis\n self.sim.add(Anisotropy(self.ku, axis=[0, 0, 1]))\n\n if self.Demag:\n print('Using Demag!')\n self.sim.add(DemagHexagonal())\n\n # ---------------------------------------------------------------------\n\n self.hls = np.ones_like(self.sim.spin.reshape(-1, 3))\n self.rgbs = np.ones((self.sim.spin.reshape(-1, 3).shape[0], 4))", "def run_sim(self):\n \n OS = self.OpticalSystem\n TL = self.TargetList\n SU = self.SimulatedUniverse\n Obs = self.Observatory\n TK = self.TimeKeeping\n \n # TODO: start using this self.currentSep\n # set occulter separation if haveOcculter\n if OS.haveOcculter == True:\n self.currentSep = Obs.occulterSep\n \n # choose observing modes selected for detection (default marked with a flag)\n allModes = OS.observingModes\n det_modes = list(filter(lambda mode: 'imag' in mode['inst']['name'], allModes))\n # and for characterization (default is first spectro/IFS mode)\n spectroModes = list(filter(lambda mode: 'spec' in mode['inst']['name'], allModes))\n if np.any(spectroModes):\n char_modes = spectroModes\n # if no spectro mode, default char mode is first observing mode\n else:\n char_modes = [allModes[0]]\n \n # begin Survey, and loop until mission is finished\n log_begin = 'OB%s: survey beginning.'%(TK.OBnumber + 1)\n self.logger.info(log_begin)\n self.vprint(log_begin)\n t0 = time.time()\n sInd = None\n ObsNum = 0\n while not TK.mission_is_over(OS, Obs, det_modes[0]):\n \n # acquire the NEXT TARGET star index and create DRM\n old_sInd = sInd #used to save sInd if returned sInd is None\n DRM, sInd, det_intTime, waitTime, det_mode = self.next_target(sInd, det_modes)\n \n if sInd is not None:\n ObsNum += 1\n\n if OS.haveOcculter == True:\n # advance to start of observation (add slew time for selected target)\n success = TK.advanceToAbsTime(TK.currentTimeAbs.copy() + waitTime)\n \n # beginning of observation, start to populate DRM\n DRM['star_ind'] = sInd\n DRM['star_name'] = TL.Name[sInd]\n DRM['arrival_time'] = TK.currentTimeNorm.copy().to('day')\n DRM['OB_nb'] = TK.OBnumber\n DRM['ObsNum'] = ObsNum\n pInds = np.where(SU.plan2star == sInd)[0]\n DRM['plan_inds'] = pInds.astype(int)\n log_obs = (' Observation #%s, star ind %s (of %s) with %s planet(s), ' \\\n + 'mission time at Obs start: %s')%(ObsNum, sInd, TL.nStars, len(pInds), \n TK.currentTimeNorm.to('day').copy().round(2))\n self.logger.info(log_obs)\n self.vprint(log_obs)\n\n # PERFORM DETECTION and populate revisit list attribute\n DRM['det_info'] = []\n detected, det_fZ, det_systemParams, det_SNR, FA = \\\n self.observation_detection(sInd, det_intTime, det_mode)\n # update the occulter wet mass\n if OS.haveOcculter == True:\n DRM = self.update_occulter_mass(DRM, sInd, det_intTime, 'det')\n det_data = {}\n det_data['det_status'] = detected\n det_data['det_SNR'] = det_SNR\n det_data['det_fZ'] = det_fZ.to('1/arcsec2')\n det_data['det_params'] = det_systemParams\n det_data['det_mode'] = dict(det_mode)\n det_data['det_time'] = det_intTime.to('day')\n del det_data['det_mode']['inst'], det_data['det_mode']['syst']\n DRM['det_info'].append(det_data)\n\n # PERFORM CHARACTERIZATION and populate spectra list attribute\n DRM['char_info'] = []\n if char_modes[0]['SNR'] not in [0, np.inf]:\n characterized, char_fZ, char_systemParams, char_SNR, char_intTime = \\\n self.observation_characterization(sInd, char_modes)\n else:\n char_intTime = None\n lenChar = len(pInds) + 1 if True in FA else len(pInds)\n characterized = np.zeros((lenChar,len(char_modes)), dtype=float)\n char_SNR = np.zeros((lenChar,len(char_modes)), dtype=float)\n char_fZ = np.array([0./u.arcsec**2, 0./u.arcsec**2])\n char_systemParams = SU.dump_system_params(sInd)\n\n for mode_index, char_mode in enumerate(char_modes):\n char_data = {}\n assert char_intTime != 0, \"Integration time can't be 0.\"\n # update the occulter wet mass\n if OS.haveOcculter == True and char_intTime is not None:\n char_data = self.update_occulter_mass(char_data, sInd, char_intTime, 'char')\n if np.any(characterized):\n vprint(' Char. results are: {}'.format(characterized[:-1, mode_index]))\n # populate the DRM with characterization results\n char_data['char_time'] = char_intTime.to('day') if char_intTime else 0.*u.day\n char_data['char_status'] = characterized[:-1, mode_index] if FA else characterized[:,mode_index]\n char_data['char_SNR'] = char_SNR[:-1, mode_index] if FA else char_SNR[:, mode_index]\n char_data['char_fZ'] = char_fZ[mode_index].to('1/arcsec2')\n char_data['char_params'] = char_systemParams\n # populate the DRM with FA results\n char_data['FA_det_status'] = int(FA)\n char_data['FA_char_status'] = characterized[-1, mode_index] if FA else 0\n char_data['FA_char_SNR'] = char_SNR[-1] if FA else 0.\n char_data['FA_char_fEZ'] = self.lastDetected[sInd,1][-1]/u.arcsec**2 \\\n if FA else 0./u.arcsec**2\n char_data['FA_char_dMag'] = self.lastDetected[sInd,2][-1] if FA else 0.\n char_data['FA_char_WA'] = self.lastDetected[sInd,3][-1]*u.arcsec \\\n if FA else 0.*u.arcsec\n \n # populate the DRM with observation modes\n char_data['char_mode'] = dict(char_mode)\n del char_data['char_mode']['inst'], char_data['char_mode']['syst']\n DRM['char_info'].append(char_data)\n \n DRM['exoplanetObsTime'] = TK.exoplanetObsTime.copy()\n\n # append result values to self.DRM\n self.DRM.append(DRM)\n \n else:#sInd == None\n sInd = old_sInd#Retain the last observed star\n if(TK.currentTimeNorm.copy() >= TK.OBendTimes[TK.OBnumber]): # currentTime is at end of OB\n #Conditional Advance To Start of Next OB\n if not TK.mission_is_over(OS, Obs, det_mode):#as long as the mission is not over\n TK.advancetToStartOfNextOB()#Advance To Start of Next OB\n elif(waitTime is not None):\n #CASE 1: Advance specific wait time\n success = TK.advanceToAbsTime(TK.currentTimeAbs.copy() + waitTime)\n self.vprint('waitTime is not None')\n else:\n startTimes = TK.currentTimeAbs.copy() + np.zeros(TL.nStars)*u.d # Start Times of Observations\n observableTimes = Obs.calculate_observableTimes(TL,np.arange(TL.nStars),startTimes,self.koMap,self.koTimes,self.mode)[0]\n #CASE 2 If There are no observable targets for the rest of the mission\n if((observableTimes[(TK.missionFinishAbs.copy().value*u.d > observableTimes.value*u.d)*(observableTimes.value*u.d >= TK.currentTimeAbs.copy().value*u.d)].shape[0]) == 0):#Are there any stars coming out of keepout before end of mission\n self.vprint('No Observable Targets for Remainder of mission at currentTimeNorm= ' + str(TK.currentTimeNorm.copy()))\n #Manually advancing time to mission end\n TK.currentTimeNorm = TK.missionLife\n TK.currentTimeAbs = TK.missionFinishAbs\n else:#CASE 3 nominal wait time if at least 1 target is still in list and observable\n #TODO: ADD ADVANCE TO WHEN FZMIN OCURS\n inds1 = np.arange(TL.nStars)[observableTimes.value*u.d > TK.currentTimeAbs.copy().value*u.d]\n inds2 = np.intersect1d(self.intTimeFilterInds, inds1) #apply intTime filter\n inds3 = self.revisitFilter(inds2, TK.currentTimeNorm.copy() + self.dt_max.to(u.d)) #apply revisit Filter #NOTE this means stars you added to the revisit list \n self.vprint(\"Filtering %d stars from advanceToAbsTime\"%(TL.nStars - len(inds3)))\n oTnowToEnd = observableTimes[inds3]\n if not oTnowToEnd.value.shape[0] == 0: #there is at least one observableTime between now and the end of the mission\n tAbs = np.min(oTnowToEnd)#advance to that observable time\n else:\n tAbs = TK.missionStart + TK.missionLife#advance to end of mission\n tmpcurrentTimeNorm = TK.currentTimeNorm.copy()\n success = TK.advanceToAbsTime(tAbs)#Advance Time to this time OR start of next OB following this time\n self.vprint('No Observable Targets a currentTimeNorm= %.2f Advanced To currentTimeNorm= %.2f'%(tmpcurrentTimeNorm.to('day').value, TK.currentTimeNorm.to('day').value))\n else:#TK.mission_is_over()\n dtsim = (time.time() - t0)*u.s\n log_end = \"Mission complete: no more time available.\\n\" \\\n + \"Simulation duration: %s.\\n\"%dtsim.astype('int') \\\n + \"Results stored in SurveySimulation.DRM (Design Reference Mission).\"\n self.logger.info(log_end)\n print(log_end)", "def run_simulation(self):\n env = simpy.Environment()\n env.process(self._simulation(env))\n env.run(until=24 * HORIZON)\n return self.total_cost, self.total_profit, self.number_of_courses", "def getSimFromUID(self, uid):\n print \"in getSimFromUID, simulations:\", str(self.simulations)\n if uid in self.simulations:\n return self.simulations[uid]\n else:\n return None", "def __init__(\n\t\tself, executable_sim=None, directory_template=None,\n\t\tcolumn_name_gid=\"genome_ID\", column_name_ncbi=\"NCBI_ID\", column_name_source=\"source\", separator='\\t',\n\t\tfilename_prefix=\"simulated_\", keep_original=True,\n\t\tmax_processors=1, tmp_dir=None, logfile=None, verbose=True, debug=False, seed=None):\n\t\tsuper(StrainSimulationWrapper, self).__init__(logfile, verbose)\n\t\tassert isinstance(keep_original, bool)\n\t\tassert isinstance(separator, str)\n\t\tassert isinstance(column_name_gid, str)\n\t\tassert isinstance(column_name_ncbi, str)\n\t\tassert isinstance(column_name_source, str)\n\t\tassert isinstance(filename_prefix, str)\n\t\tassert isinstance(debug, bool)\n\n\t\tif tmp_dir is None:\n\t\t\ttmp_dir = tempfile.gettempdir()\n\n\t\tself._debug = debug\n\t\tif debug:\n\t\t\tself._logger.set_level(self._logger.DEBUG)\n\n\t\tif seed is not None:\n\t\t\trandom.seed(seed)\n\t\t\tnp_random.seed(abs(hash(seed)) % 4294967295) # numpy accepts only 32 bit integers\n\n\t\tassert isinstance(max_processors, int)\n\t\tself._max_processors = max_processors\n\n\t\tself._separator = separator\n\t\tself._column_name_gid = column_name_gid\n\t\tself._column_name_ncbi = column_name_ncbi\n\t\tself._column_name_source = column_name_source\n\t\tself._filename_prefix = filename_prefix\n\t\tself._keep_original = keep_original\n\t\tself._directory_template = directory_template\n\n\t\tdirectory_sgevolver = self.get_full_path(os.path.join(os.path.dirname(__file__), \"sgEvolver\"))\n\t\tself._executable_sim = executable_sim\n\t\tif self._executable_sim is None:\n\t\t\tself._executable_sim = os.path.join(directory_sgevolver, \"simujobrun.pl\")\n\t\tassert self.validate_file(self._executable_sim, executable=True)\n\n\t\tif self._directory_template is None:\n\t\t\tself._directory_template = self.get_full_path(os.path.join(os.path.dirname(__file__), \"sgEvolver\", \"simulation_dir\"))\n\t\tassert self.validate_dir(self._directory_template, file_names=[self._filename_tree, self._filename_parameter])\n\n\t\tself._tmp_dir = tmp_dir\n\t\tassert self.validate_dir(self._tmp_dir)\n\n\t\tself._directory_strain = self.get_full_path(os.path.join(self._tmp_dir, \"{gid}.strains\"))\n\t\tfile_path_template_newick_tree = os.path.join(self._directory_template, self._directory_template_filenames[1])\n\t\tself._filenames_strains = self.get_filenames_strains(file_path_template_newick_tree)\n\t\tassert len(self._filenames_strains) > 0", "def test_run_sim():\n rnd = rand.Arrivals(31, 40)\n sim.run_sim(2, 1, 3, 4, 24, rnd)", "def run_sims(nsims = 10, plot = True):\n mat = init_matrix()\n init_loc = np.where(mat == 1)\n init_loc = (init_loc[0][0], init_loc[1][0])\n loc_list = [init_loc]\n\n for _ in range(nsims):\n loc_list.append(matrix_step(loc_list[-1])) # the most recent entry in the list\n # print(loc_list[-2], loc_list[-1])\n\n if plot:\n plot_matrix(loc_list)\n return(loc_list)", "def test_get_sim_interface_returns_singleton(self):\n global locator, config_paths\n locator.load_config(config_paths[0])\n sim_interface1 = locator.get_sim_interface()\n sim_interface2 = locator.get_sim_interface()\n \n self.assertEqual(sim_interface1, sim_interface2,\n 'Two subsequent calls of get_sim_interface returned different instances.')", "async def api_copySimulation(self):\n req = self.parse_post(id=True, folder=True, name=True, template=True)\n d = simulation_db.read_simulation_json(req.type, sid=req.id, qcall=self)\n d.models.simulation.pkupdate(\n name=req.name,\n folder=req.folder,\n isExample=False,\n outOfSessionSimulationId=\"\",\n )\n return self._save_new_and_reply(req, d)", "def start_background_simulation(cls, simulation):\r\n output_lines_of_one_group = simulation.play()\r\n return output_lines_of_one_group", "def get_first_sample() -> Sample:\n print(get_intro_message())\n\n user_satisfied = False\n while not user_satisfied:\n sample = get_single_sample()\n\n print(\"Sample recorded : \\\"\" + sample.string + \"\\\"\")\n user_satisfied = get_binary_validation(\n \"Do you want to keep this sample ?\", True\n )\n\n sample.impostor = False\n\n return sample", "def init_sims(self, replace=False):\n return self.docvecs.init_sims(replace=replace)", "def perform_simulation(\n simulator: Optional[Simulator] = None,\n setup: Optional[Callable[[Simulator], Simulator]] = None,\n time_step: float = 1.0,\n) -> Iterator[Timestep]:\n if simulator is None:\n simulator = initialize_simulator()\n\n if setup:\n setup(simulator)\n\n while True:\n yield simulator.step(time_step)", "def _simulate(self):\n # sample incident and update status of vehicles at new time t\n self.sim.t, self.time, type_, loc, prio, req_vehicles, func, dest = self.sim._sample_incident()\n\n self.sim._update_vehicles(self.sim.t, self.time)\n\n # sample dispatch time\n dispatch = self.sim.rsampler.sample_dispatch_time(type_)\n\n # keep track of minimum TS response time\n min_ts_response = np.inf\n\n # get target response time\n target = self.sim._get_target(type_, func, prio)\n\n # sample rest of the response time for TS vehicles\n for v in req_vehicles:\n if v == \"TS\":\n\n vehicle, estimated_time = self.sim._pick_vehicle(loc, v)\n if vehicle is None:\n turnout, travel, onscene, response = [np.nan]*4\n else:\n vehicle.assign_crew() # always full time in this case\n\n turnout, travel, onscene = self.sim.rsampler.sample_response_time(\n type_, loc, vehicle.current_station_name, vehicle.type, vehicle.current_crew,\n prio, estimated_time=estimated_time)\n\n response = dispatch + turnout + travel\n vehicle.dispatch(dest, self.sim.t + (response + onscene + estimated_time) / 60)\n\n # we must return a numerical value\n if np.isnan(response):\n response = self.worst_response\n\n if response < min_ts_response:\n min_ts_response = response\n\n return min_ts_response, target", "def test_calculate_class_2_individuals_best_response_simulation_all_inds_in_one():\n all_individuals_to_first = calculate_class_2_individuals_best_response(\n lambda_2=0.3,\n lambda_1_1=0.1,\n lambda_1_2=3,\n mu_1=10,\n mu_2=2,\n num_of_servers_1=8,\n num_of_servers_2=4,\n threshold_1=6,\n threshold_2=3,\n system_capacity_1=float(\"inf\"),\n system_capacity_2=float(\"inf\"),\n buffer_capacity_1=float(\"inf\"),\n buffer_capacity_2=float(\"inf\"),\n use_simulation=True,\n runtime=500,\n num_of_trials=5,\n warm_up_time=100,\n seed_num_1=10,\n seed_num_2=10,\n )\n assert all_individuals_to_first == 1\n\n all_individuals_to_second = calculate_class_2_individuals_best_response(\n lambda_2=0.3,\n lambda_1_1=3,\n lambda_1_2=0.1,\n mu_1=2,\n mu_2=10,\n num_of_servers_1=4,\n num_of_servers_2=8,\n threshold_1=3,\n threshold_2=6,\n system_capacity_1=float(\"inf\"),\n system_capacity_2=float(\"inf\"),\n buffer_capacity_1=float(\"inf\"),\n buffer_capacity_2=float(\"inf\"),\n use_simulation=True,\n runtime=500,\n num_of_trials=5,\n warm_up_time=100,\n seed_num_1=10,\n seed_num_2=10,\n )\n assert all_individuals_to_second == 0", "def create_front_simulation(self):\r\n simulations_id = 1\r\n sim_viz = SimulationViz.FRONT\r\n mutation_rules = self.create_mutation_rules(mutation_rate=0.005)\r\n mutation_maker = self.create_mutation_maker(mutation_rules)\r\n simulation_stats = self.create_simulation_stats(simulations_id)\r\n breeding_rules = self.create_breading_rules(elitist_candidates=3, elitism_rate=0.05, discard_rate=0.05)\r\n breeder = self.create_breeder(breeding_rules)\r\n num_of_solutions = 150\r\n simulation = self.create_simulation(simulations_id, num_of_solutions, mutation_maker, simulation_stats,\r\n breeder, sim_viz)\r\n return simulation", "def startSimulation(self):\n self.saveParameters()\n self.simulation.main()", "def simulation():\n#TODO: Handle errors.\n verbose_print(\"Start simulation attributes generation.\",1)\n generator = input_simulation_attributes_generator()\n verbose_print(\"Simulation attributes generated.\",1)\n Args.simulation_function(generator)", "def run(self) -> (ExtendedImage, int):\n # Generate an initial population\n print('Generating the first population...')\n self._generate_population()\n print('The first population generated! Starting the algorithm...')\n\n # Start the algorithm itself to obtain the solution for the problem\n for i in range(self._max_iter_num):\n # Also calculate execution time of 1 iteration\n start = datetime.now()\n # Run selection to generate a new population based on the prev. one\n self._selection()\n end = datetime.now()\n print(f'Iteration {i} finished; time = {(end - start).total_seconds()}')\n\n # If STOPPING_CRITERIA is reached, terminate the algorithm\n # (the best chromosome in the new generation is good enough)\n if self._population[0][1] <= self._stop_crit:\n print('Stopping criteria reached. Terminate.')\n break\n\n print('Algorithm finished!')\n # Return obtained result in the form of tuple (picture, fitness value)\n return self._population[0]", "def _simulate(self, sims):\n for _ in range(sims):\n current = self\n while not current.done():\n #if current.Q > 0.8 and current.turn > 20: break;\n if current.sim_flag: break # if branch fully explored, don't simulate it\n if not current.children: # if unsimulated:\n current._expand() # create child nodes for each legal move\n current._predict() # get Q and pi from network for each child\n current = current._action() # select next move with stochasticity\n if not current.sim_flag: current._backpropogate(self) # propogate new leaf\n \"\"\"\n # one simulation should not affect another. you need to parralelize this operation.\n # always update N, but hide Q and pi values\n # or maybe they shouldn't be hidden?\n # _simulate: \n # 1) lock, expand, unlock\n # 2) make threads for sim simulations \n # 3) check unlocked, then _action()\n # lock, backpropogate, unlock\n # simulations are overlapping, so you need to lock a node while updating\n # _backpropogate: lock current, update, lock next, release last\n \"\"\"", "def test_run_simulation_stores_result(self):\n sim = ss.Simulation()\n assert sim.results == []\n sim.run_simulation(10)\n assert sim.results != []\n assert len(sim.results) == 10", "def _load_simulator(self):\n path = \"/\".join([\"..\", \"backends\", \"firecommander_aa_17_ts_v0.fdsim\"])\n with open(resource_filename(__name__, path), 'rb') as fd:\n sim = pickle.load(fd)\n sim.rsampler._create_response_time_generators()\n sim.isampler.reset_time()\n sim.big_sampler._create_big_incident_generator()\n sim.set_max_target(sim.max_target)\n # filter to only TS vehicles\n rs = sim.resource_allocation.copy()\n rs[[\"RV\", \"HV\", \"WO\"]] = 0\n sim.set_resource_allocation(rs)\n return sim", "def Main():\n numberOfPopulation = 350\n numberOfDays = 60\n \n simulation = Simulation(Covid19(), numberOfPopulation, numberOfDays, \"Covid 19 Simulation\")\n simulation.run() \n simulation = Simulation(Ebola(), numberOfPopulation, numberOfDays, \"Ebola Simulation\")\n simulation.run()", "def cli_simulate(model_file, output_dir, exporter, overwrite, compression,\n confirm, progress: int, progress_tag, output_same,\n simtime_total, simtime_lims, max_sweeps, max_residual, fipy_solver,\n snapshot_interval,\n plot, video, frames, budget, resume, show_eqns):\n\n click.secho('Starting MicroBenthos simulation', fg='green')\n from microbenthos.utils import yaml\n\n click.echo('Loading model from {}'.format(model_file))\n with open(model_file, 'r') as fp:\n defs = yaml.unsafe_load(fp)\n\n if 'model' not in defs and 'domain' in defs:\n # model is not under a separate key, so insert it under \"model\"\n defs = dict(model=defs)\n\n if 'simulation' not in defs:\n defs['simulation'] = {}\n\n # we want to override the keys in the loaded simulation dictionary,\n # so that when it is created the definition stored on the instance and\n # eventually exported to file includes these user overrides\n\n sim_kwargs = dict(\n simtime_total=simtime_total,\n fipy_solver=fipy_solver,\n max_sweeps=max_sweeps,\n simtime_lims=simtime_lims,\n max_residual=max_residual,\n snapshot_interval=snapshot_interval,\n )\n for k, v in sim_kwargs.items():\n if v is None:\n continue\n else:\n defs['simulation'][k] = v\n\n if output_same:\n output_dir = str(Path(model_file).parent)\n click.secho(f'Output directory set to: {output_dir}')\n\n from microbenthos.runners import SimulationRunner\n runner = SimulationRunner(output_dir=output_dir,\n model=defs['model'],\n simulation=defs['simulation'],\n resume=resume,\n overwrite=overwrite,\n confirm=confirm,\n progress=progress,\n progress_tag=progress_tag,\n plot=plot,\n video=video,\n frames=frames,\n budget=budget,\n exporters=exporter,\n show_eqns=show_eqns)\n\n if not runner.get_data_exporters():\n click.secho('No data exporters defined. Adding with compression={}'.format(\n compression), fg='red')\n runner.add_exporter('model_data', output_dir=runner.output_dir,\n compression=compression)\n\n runner.run()", "def get_random_individual(self, generation):\n if len(self.generations) <= generation < 0:\n raise ValueError('Please enter a valid generation.')\n return self.get_individual(\n generation=generation,\n index=random.randint(0, len(self.generations[generation]) - 1))", "def test_single_game_returns_tuple(self):\n sim = ss.Simulation()\n assert type(sim.single_game()) == tuple, 'single_game should return ' \\\n 'tuple'", "def genetic_algorithm(self) -> Tour:\n print(\"{:*^120}\".format(\"Initialize first population\")) \n # Init first population\n pop = Population(self.population_size, self.map, initial=True)\n # print(pop)\n print(\"{:*^120}\".format(\"Initial fittes tour\")) \n print(\"Tour:\", pop.get_fittess_tour())\n print(\"Cost:\", pop.get_fittess_tour().get_cost())\n # print(\"Initial tour fitness:\", pop.get_fittess_tour().get_fitness())\n\n # Set first pop\n self.evol.setPop(pop)\n \n for i in range(self.no_generations):\n # Evolve next generation\n pop = self.evol.evolve_generation()\n \n if i % self.print_cost_per_gen == 0:\n print(\"{:-^50}Generation {}{:-^50}\".format(\"\", i, \"\"))\n print(\"Tour:\", pop.get_fittess_tour())\n print(\"Tour cost: {}\".format(pop.get_fittess_tour().get_cost()))\n\n # Set new pop\n self.evol.setPop(pop)\n\n return self.evol.getPop().get_fittess_tour()", "def init_one(self, _=0):\n some_srepr = self.simple_random_srepr()\n some_fitness, some_accuracy, some_ng = self.fitness(some_srepr)\n return some_fitness, some_accuracy, some_ng, some_srepr", "def step(self, actions):\n assert (len(actions) == len(self.simulators))\n\n data_out = {outp: self.num_simulators*[None] for outp in self.outputs}\n\n # def act(idx, s):\n # try:\n # response = s.step(actions[idx])\n # if self.simulator_type == 'room_simulator':\n # response = self._convert_observation(s, response, self.outputs) ## ATTENTION DANS DOOM ON MODIFIE DIRECTEMENT LES DATA DANS DOOM SIMULATOR\n # for outp in self.outputs:\n # data_out[outp][idx] = response[outp] # ICI LES DATA ONT LA BONNE SHAPE\n # except Exception as exc:\n # print('Exception when stepping simulator with id: ' + str(idx))\n # raise exc\n\n # with ThreadPoolExecutor(max_workers=self.num_simulators) as executor:\n # futures = []\n # for i in range(self.num_simulators):\n # future = executor.submit(act, i, self.simulators[i])\n # futures.append(future)\n # concurrent.futures.wait(futures)\n # # check if any exception\n # for f in futures:\n # f.result()\n\n data_out = {outp: [] for outp in self.outputs}\n \n for (sim, act) in zip(self.simulators, actions):\n data_one_sim = sim.step(act)\n for outp in self.outputs:\n data_out[outp].append(data_one_sim[outp])\n\n # print(data_out.keys())\n return data_out", "def run_metropolis(self):\n\n # Initialize the posistions for each new Monte Carlo run\n positions = np.random.rand(self.num_p, self.num_d)\n # Initialize the distance matrix\n self.s.positions_distances(positions)\n # check if the wave function is zero\n while True:\n test_wavefunction = self.w.wavefunction(positions)\n if test_wavefunction**2 <= 1e-14:\n # Initialize the posistions for each new Monte Carlo run\n positions = np.random.rand(self.num_p, self.num_d)\n # Initialize the distance matrix\n self.s.positions_distances(positions)\n else:\n break\n\n # Initialize sampler method for each new Monte Carlo run\n self.sam.initialize()\n\n for i in range(self.mc_cycles):\n new_positions = self.metropolis_step(positions)\n positions = new_positions\n self.sam.sample_values(positions)\n\n self.sam.average_values(self.mc_cycles)\n energy = self.sam.local_energy\n d_El = self.sam.derivative_energy\n var = self.sam.variance\n self.print_averages()\n return d_El, energy, var", "def __call__(self, key, t=None, clumpexist=False, clump=None, sims=None):\n # Slice the simulation group is needed\n simgrp = _simgroupslice(self, sims)\n \n # No simulations were selected\n if len(simgrp) < 1:\n \n return None\n \n # Loop through all the simulations at generate the request arrays\n outlist = []\n \n for iSim, s in enumerate(simgrp):\n \n if len(s) > 0:\n # The simulation has a clump. Now generate the requested array\n val = s(key,t,clumpexist,clump)\n \n else:\n # The simulation has no clumps\n val = None\n \n # Append output (if it's not None)\n if val is not None:\n \n if len(val) > 0:\n \n outlist.append(val)\n \n # Concatenate the list of output arrays into a single SimArray\n outarray = arraylistcat(outlist)\n \n return outarray", "def get_sim_funs_for_matching():\n return get_sim_funs()", "def initialize_simulation(self) -> Simulation:\n pass", "def sample_simulation() -> Dict[str, Tuple[str, float]]:\n sim = Simulation('stations.json', 'sample_rides.csv')\n sim.run(datetime(2017, 6, 1, 8, 0, 0),\n datetime(2017, 6, 1, 9, 0, 0))\n\n return sim.calculate_statistics()", "def simulation_loop_satisficing(df_exp, df_model, n_subj, n_sim=10):\n\n # Cycle over simulations\n for i in range(0, n_sim):\n\n # Simulate the data\n sim_est_err, sim_pers_prob, df_sim, _ = simulation_satisficing(df_exp, df_model, n_subj)\n\n # Put all data in data frames for estimation errors and perseveration\n # Also concatenate all data\n if i == 0:\n all_sim_pers = sim_pers_prob.copy()\n all_sim_est_errs = sim_est_err.copy()\n all_data = df_sim.copy()\n else:\n all_sim_pers = pd.concat([all_sim_pers, sim_pers_prob])\n all_sim_est_errs = pd.concat([all_sim_est_errs, sim_est_err])\n all_data = pd.concat([all_data, df_sim])\n\n return all_sim_pers, all_sim_est_errs, all_data", "def simulate(self):\n\n # simulate all patients\n for patient in self._patients:\n patient.simulate(Data.SIM_LENGTH)\n\n # return the cohort outputs\n return CohortOutputs(self)", "def test_run_simulation_returns_nothing(self):\n sim = ss.Simulation()\n assert sim.run_simulation(10) is None", "def simulate(\n self,\n parameters: Union[Sequence[Union[int, float]], ArrayLike1D],\n nobs: int,\n rng: RNGType,\n burn: int = 500,\n initial_value: Optional[Union[float, NDArray]] = None,\n ) -> Tuple[NDArray, NDArray]:", "def simulate(): \n \n # Create tmpdir to hold all steerfiles and log files \n SimObj = Simulation(steerfiles=steerfiles, name=os.path.splitext(os.path.basename(rawfile_alu))[0] + '-sim' )\n\n # Set Beam energy\n SimObj.set_beam_momentum(beamenergy)\n\n # Create steerfiles for processing\n simpath = create_sim_path_air(SimObj)\n\n # Get gearfile\n localgearfile = SimObj.get_filename('gear.xml')\n\n # Misalign gear file\n randomize_telescope(gearfile=localgearfile, mean_list=mean_list, sigma_list=sigma_list, sensorexception_list=sensorexception_list, modeexception_list=modeexception_list)\n\n localtruthdb_filename=SimObj.create_dbfilename(truthdb_filename)\n\n # Convert gear file to alignmentDB root file, which will be stored in the sim folder\n Create_AlignmentDBFile_From_Gear(gearfile=SimObj.get_filename('gear.xml'), truthdbfilename=localtruthdb_filename)\n\n # Copy gearfile\n SimObj.copy_file('gear.xml','gear_air.xml')\n\n # Get air gearfile\n gearfile_air = SimObj.get_filename('gear_air.xml')\n\n # Change DUT in copied gearfile\n set_parameter(gearfile=gearfile_air, sensorID=11, parametername='thickness', value=0.0001)\n set_parameter(gearfile=gearfile_air, sensorID=11, parametername='radLength', value=304000.0)\n\n\n # Create caltag for the truthdb\n localcaltag = os.path.splitext(os.path.basename(rawfile_air))[0] + '-test'\n simcaltag=localcaltag+ '-truthdb'\n\n # Run simulation to create rawfile with simulated digits \n SimObj.simulate(path=simpath,caltag=simcaltag)", "def replicate(self,simulation_run):\n\n return self._runModel(params=simulation_run.params)", "def run(self, initialPopulation = None):\n\t\tpprint(\"OPT calculating initial population...\", BLUE, self.printing)\n\t\t\n\t\tif initialPopulation == None:\n\t\t\t# if we don't get an initial set of schedules as the initial population,\n\t\t\t# then we need to generate one.\n\t\t\tpopulation = self.initialPopulation()\n\t\telse:\n\t\t\t# if we do get an initial population as input, then we just need to \n\t\t\t# calculate the fitnesses of the schedules in it.\n\t\t\tfor p in initialPopulation:\n\t\t\t\tself.calcIndividualFitness(p)\n\t\t\t# if the population is too small or too large (less than or larger than\n\t\t\t# self.populationSize) then this will fix that for us.\n\t\t\tpopulation = self.mutatePopulation(initialPopulation)\n\t\t\n\t\t# go through the needed number of iterations and mutate the population\n\t\t# everytime, this will keep the best individuals and will return the \n\t\t# best population achieved at the end.\n\t\tfor i in range(self.iterations):\n\t\t\tpprint(\"OPT iteration number %s\" % (i + 1), BLUE, self.printing)\n\t\t\tpopulation = self.mutatePopulation(population)\n\t\treturn population", "def simulate(self):\n # Simulate the testbench\n if len(self.generics.keys()) == 0:\n log.warning(\n 'No generics are supplied by this test case, if the ' +\n 'testbench uses generics' +\n ' they will assume their default values.'\n )\n\n if self._simulator is None or not self._simulator.installed:\n name = None if self._simulator is None else self._simulator.name\n raise EnvironmentError(\n \"Test aborted, {0} is not available.\".format(\n name\n )\n )\n\n ret_val, stdout, stderr = self._simulator.simulate(\n library=self.library,\n entity=self.entity,\n includes=self._simulation_libraries,\n duration=self.duration,\n generics=self.generics,\n gui=False\n )\n return (ret_val, stdout, stderr)", "def sim12_g_simulation(datafiles, simulation_mag_zeropoint, simulation_exposure):\n return stuff.Simulation(datafiles / 'sim12' / 'sim12_g.list', simulation_mag_zeropoint, simulation_exposure)", "def get_simulations(self, num_simulations):\n c = self.collection\n cursor = c.find().sort('_id', direction=-1).limit(num_simulations)\n simulations = []\n\n for s in cursor:\n s['date'] = s['_id'].generation_time\n simulations.append(s)\n\n return simulations", "def get_batched_simulator(simulator: Callable) -> Callable:\n\n # XXX: this should be handled with more care, e.g., enable multiprocessing\n # XXX: with Pool() as p: p.map(...)\n def batched_simulator(thetas: Tensor) -> Tensor:\n # use map to get data for every theta in batch\n # use stack to collect list of tensors in tensor\n assert (\n thetas.ndim > 1\n ), f\"batch simulator needs batch dimension. shape: {thetas.shape}\"\n return torch.stack(list(map(simulator, thetas)))\n\n return batched_simulator", "def build_and_run_(spec):\n opt = spec['opt']\n print \"pool starting \", opt\n\n # lenght of simulation \n tf = float(opt.get('tf', 100))\n\n # model # coupling function # connectivity \n simargs = {}\n for mod, key in [(models, 'model'), \n (connectivity, 'connectivity'),\n (coupling, 'coupling')]:\n simargs[key] = build_sim_part(mod, opt[key])\n\n # noise # integrator \n optint = opt['integrator']\n if 'noise' in optint:\n optint['noise'] = build_sim_part(noise, optint['noise'])\n simargs['integrator'] = build_sim_part(integrators, optint)\n\n # monitors \n if not type(opt['monitors']) in (list,):\n opt['monitors'] = [opt['monitors']]\n simargs['monitors'] = []\n for mon in opt['monitors']:\n simargs['monitors'].append(build_sim_part(monitors, mon))\n\n # stimulus \n # NotImplemented\n\n # simulator \n sim = simulator.Simulator(**simargs)\n sim.configure()\n\n # TODO open HDF5 first, figure out correct sizes, etc\n\n # loop, writing data to h5\n ts = [[] for _ in opt['monitors']]\n ys = [[] for _ in opt['monitors']]\n for i, all_monitor_data in enumerate(sim(tf)):\n for j, mondata in enumerate(all_monitor_data):\n if not mondata is None:\n t, y = mondata\n ts[j].append(t)\n ys[j].append(y)\n\n # write data to hdf5 file\n path = os.path.abspath(opt.get('wd', './'))\n h5fname = os.path.join(path, \"tvb_%s.h5\" % (spec['md5sum'], ))\n h5 = h5py.File(h5fname, 'w')\n\n for i, (mon, (t, y)) in enumerate(zip(simargs['monitors'], zip(ts, ys))):\n mname = \"mon_%d_%s\" % (i, mon.__class__.__name__)\n g = h5.create_group(mname)\n g.create_dataset('ts', data=t)\n g.create_dataset('ys', data=y)\n\n h5.close()\n\n # return filename\n print \"pool finished\", opt\n return h5fname", "def plain_sim():\n return BioSim(island_map=\"WWWW\\nWLHW\\nWWWW\",\n ini_pop=[],\n seed=1)", "def runSimulation(numSteps):\r\n rabbit_populations = []\r\n fox_populations = []\r\n for step in range(numSteps):\r\n rabbitGrowth()\r\n foxGrowth()\r\n rabbit_populations.append(CURRENTRABBITPOP)\r\n fox_populations.append(CURRENTFOXPOP)\r\n return (rabbit_populations, fox_populations)", "def run_simulation(self):\n self.market.market_session(True)\n return \"\"", "def sim12_r_simulation(datafiles, simulation_mag_zeropoint, simulation_exposure):\n return stuff.Simulation(datafiles / 'sim12' / 'sim12_r.list', simulation_mag_zeropoint, simulation_exposure)", "def simulate(self):\n score = [0 for _ in range(N_PLAYERS)]\n self.sim.play_random_game()\n w = self.sim.winner\n if w in (0,1):\n score[w] += 1\n return np.array(score)", "def reset(self):\n# \n self.end_and_close()\n# self.sim.start()\n\n # Start the next simulation\n self.sim._model.swmm_open()\n self.sim._model.swmm_start()\n\n # get the state\n state = self._state()\n return state", "def get_simulation(\n component: ComponentOrFactory,\n port_extension: Optional[float] = 4.0,\n layer_stack: LayerStack = LAYER_STACK,\n thickness_pml: float = 1.0,\n xmargin: float = 0,\n ymargin: float = 0,\n xmargin_left: float = 0,\n xmargin_right: float = 0,\n ymargin_top: float = 0,\n ymargin_bot: float = 0,\n zmargin: float = 1.0,\n clad_material: str = \"sio2\",\n port_source_name: str = \"o1\",\n port_margin: float = 0.5,\n port_source_offset: float = 0.1,\n distance_source_to_monitors: float = 0.2,\n resolution: float = 50,\n wavelength_start: float = 1.50,\n wavelength_stop: float = 1.60,\n wavelength_points: int = 50,\n plot_modes: bool = False,\n num_modes: int = 2,\n run_time_ps: float = 10.0,\n dispersive: bool = False,\n material_name_to_tidy3d_index: Dict[str, float] = MATERIAL_NAME_TO_TIDY3D_INDEX,\n material_name_to_tidy3d_name: Dict[str, str] = MATERIAL_NAME_TO_TIDY3D_NAME,\n is_3d: bool = True,\n with_all_monitors: bool = False,\n) -> td.Simulation:\n component = component() if callable(component) else component\n assert isinstance(component, Component)\n\n layer_to_thickness = layer_stack.get_layer_to_thickness()\n layer_to_material = layer_stack.get_layer_to_material()\n layer_to_zmin = layer_stack.get_layer_to_zmin()\n # layer_to_sidewall_angle = layer_stack.get_layer_to_sidewall_angle()\n\n if dispersive:\n material_name_to_tidy3d = material_name_to_tidy3d_name\n else:\n material_name_to_tidy3d = material_name_to_tidy3d_index\n\n assert isinstance(\n component, Component\n ), f\"component needs to be a gf.Component, got Type {type(component)}\"\n if port_source_name not in component.ports:\n warnings.warn(\n f\"port_source_name={port_source_name} not in {component.ports.keys()}\"\n )\n port_source = component.get_ports_list(port_type=\"optical\")[0]\n port_source_name = port_source.name\n warnings.warn(f\"Selecting port_source_name={port_source_name} instead.\")\n\n component_padding = gf.add_padding_container(\n component,\n default=0,\n top=ymargin or ymargin_top,\n bottom=ymargin or ymargin_bot,\n left=xmargin or xmargin_left,\n right=xmargin or xmargin_right,\n )\n component_extended = (\n gf.components.extension.extend_ports(\n component=component_padding, length=port_extension, centered=True\n )\n if port_extension\n else component_padding\n )\n\n gf.show(component_extended)\n component_extended = component_extended.flatten()\n\n component_ref = component_padding.ref()\n component_ref.x = 0\n component_ref.y = 0\n\n clad_material_name_or_index = material_name_to_tidy3d[clad_material]\n clad = td.Structure(\n geometry=td.Box(\n size=(td.inf, td.inf, td.inf),\n center=(0, 0, 0),\n ),\n medium=get_medium(name_or_index=clad_material_name_or_index),\n )\n structures = [clad]\n\n layers_thickness = [\n layer_to_thickness[layer]\n for layer in component.get_layers()\n if layer in layer_to_thickness\n ]\n\n if len(layer_to_thickness) < 1:\n raise ValueError(f\"{component.get_layers()} not in {layer_to_thickness.keys()}\")\n\n t_core = max(layers_thickness)\n cell_thickness = (\n thickness_pml + t_core + thickness_pml + 2 * zmargin\n if is_3d\n else 1 / resolution\n )\n\n sim_size = [\n component_ref.xsize + 2 * thickness_pml,\n component_ref.ysize + 2 * thickness_pml,\n cell_thickness,\n ]\n\n for layer in component.layers:\n if layer in layer_to_thickness and layer in layer_to_material:\n thickness = layer_to_thickness[layer]\n zmin = layer_to_zmin[layer] if is_3d else -td.inf\n zmax = zmin + thickness if is_3d else td.inf\n\n if (\n layer in layer_to_material\n and layer_to_material[layer] in material_name_to_tidy3d\n ):\n name_or_index = material_name_to_tidy3d[layer_to_material[layer]]\n medium = get_medium(name_or_index=name_or_index)\n index = get_index(name_or_index=name_or_index)\n logger.debug(\n f\"Add {layer}, {name_or_index!r}, index = {index:.3f}, \"\n f\"thickness = {thickness}, zmin = {zmin}, zmax = {zmax}\"\n )\n\n polygons = td.PolySlab.from_gds(\n gds_cell=component_extended,\n gds_layer=layer[0],\n gds_dtype=layer[1],\n axis=2,\n slab_bounds=(zmin, zmax),\n )\n\n for polygon in polygons:\n geometry = td.Structure(\n geometry=polygon,\n medium=medium,\n )\n structures.append(geometry)\n elif layer not in layer_to_material:\n logger.debug(f\"Layer {layer} not in {layer_to_material.keys()}\")\n elif layer_to_material[layer] not in material_name_to_tidy3d:\n materials = list(material_name_to_tidy3d.keys())\n logger.debug(f\"material {layer_to_material[layer]} not in {materials}\")\n\n # Add source\n port = component_ref.ports[port_source_name]\n angle = port.orientation\n width = port.width + 2 * port_margin\n size_x = width * abs(np.sin(angle * np.pi / 180))\n size_y = width * abs(np.cos(angle * np.pi / 180))\n size_x = 0 if size_x < 0.001 else size_x\n size_y = 0 if size_y < 0.001 else size_y\n size_z = cell_thickness - 2 * zmargin if is_3d else td.inf\n\n source_size = [size_x, size_y, size_z]\n source_center = port.center.tolist() + [0] # (x, y, z=0)\n\n xy_shifted = move_polar_rad_copy(\n np.array(port.center), angle=angle * np.pi / 180, length=port_source_offset\n )\n source_center_offset = xy_shifted.tolist() + [0] # (x, y, z=0)\n\n wavelengths = np.linspace(wavelength_start, wavelength_stop, wavelength_points)\n freqs = td.constants.C_0 / wavelengths\n freq0 = td.constants.C_0 / np.mean(wavelengths)\n fwidth = freq0 / 10\n\n msource = td.ModeSource(\n size=source_size,\n center=source_center,\n source_time=td.GaussianPulse(freq0=freq0, fwidth=fwidth),\n direction=\"+\",\n )\n\n # Add port monitors\n monitors = {}\n ports = sort_ports_x(sort_ports_y(component_ref.get_ports_list()))\n for port in ports:\n port_name = port.name\n angle = port.orientation\n width = port.width + 2 * port_margin\n size_x = width * abs(np.sin(angle * np.pi / 180))\n size_y = width * abs(np.cos(angle * np.pi / 180))\n size_x = 0 if size_x < 0.001 else size_x\n size_y = 0 if size_y < 0.001 else size_y\n size = (size_x, size_y, size_z)\n\n # if monitor has a source move monitor inwards\n length = -distance_source_to_monitors if port_name == port_source_name else 0\n xy_shifted = move_polar_rad_copy(\n np.array(port.center), angle=angle * np.pi / 180, length=length\n )\n center = xy_shifted.tolist() + [0] # (x, y, z=0)\n\n monitors[port_name] = td.ModeMonitor(\n center=center,\n size=size,\n freqs=freqs,\n mode_spec=td.ModeSpec(num_modes=1),\n name=port.name,\n )\n\n zcenter = (zmax + zmin) / 2 if is_3d else 0\n domain_monitor = td.FieldMonitor(\n center=[0, 0, zcenter],\n size=[sim_size[0], sim_size[1], 0] if is_3d else [td.inf, td.inf, 0],\n freqs=[freq0],\n name=\"field\",\n )\n monitors = list(monitors.values())\n monitors += [domain_monitor] if with_all_monitors else []\n\n sim = td.Simulation(\n size=sim_size,\n grid_size=3 * [1 / resolution],\n structures=structures,\n sources=[msource],\n monitors=monitors,\n run_time=20 * run_time_ps / fwidth,\n pml_layers=3 * [td.PML()] if is_3d else [td.PML(), td.PML(), None],\n )\n\n if plot_modes:\n src_plane = td.Box(center=source_center_offset, size=source_size)\n ms = td.plugins.ModeSolver(simulation=sim, plane=src_plane, freq=freq0)\n mode_spec = td.ModeSpec(num_modes=num_modes)\n modes = ms.solve(mode_spec=mode_spec)\n\n print(\n \"Effective index of computed modes: \",\n \", \".join([f\"{mode.n_eff:1.4f}\" for mode in modes]),\n )\n\n if is_3d:\n fig, axs = plt.subplots(num_modes, 2, figsize=(12, 12))\n else:\n fig, axs = plt.subplots(num_modes, 3, figsize=(12, 12))\n\n for mode_ind in range(num_modes):\n if is_3d:\n abs(modes[mode_ind].field_data.Ey).plot(\n x=\"y\", y=\"z\", cmap=\"magma\", ax=axs[mode_ind, 0]\n )\n abs(modes[mode_ind].field_data.Ez).plot(\n x=\"y\", y=\"z\", cmap=\"magma\", ax=axs[mode_ind, 1]\n )\n else:\n abs(modes[mode_ind].field_data.Ex).plot(ax=axs[mode_ind, 0])\n abs(modes[mode_ind].field_data.Ey).plot(ax=axs[mode_ind, 1])\n abs(modes[mode_ind].field_data.Ez).plot(ax=axs[mode_ind, 2])\n\n axs[mode_ind, 0].set_title(f\"|Ex|: mode_index={mode_ind}\")\n axs[mode_ind, 1].set_title(f\"|Ey|: mode_index={mode_ind}\")\n axs[mode_ind, 2].set_title(f\"|Ez|: mode_index={mode_ind}\")\n\n if is_3d:\n axs[mode_ind, 0].set_aspect(\"equal\")\n axs[mode_ind, 1].set_aspect(\"equal\")\n plt.show()\n return sim", "def run(self):\n for i in range(self.generations):\n log.info(f'Training population in generation {i + 1}...')\n if i == 0:\n self.create_first_generation()\n else:\n self.create_next_generation()\n log.info(f'best individual: {self.best_individual()[1]}')\n log.info(f'best individual score: {self.best_individual()[0]}')", "def start_simulation(self):\n regime_name = str(self.regime_list.item(self._current_regime_index).text())\n self.statusLabel.setText(u\"simulating {}\".format(regime_name))\n self._logger.info(u\"Simulating: {}\".format(regime_name))\n\n self.actSimulate.setDisabled(True)\n self.shortRunSimulation.setEnabled(False)\n self.shortRunRegimeBatch.setEnabled(False)\n self.actExecuteRegimes.setDisabled(True)\n self.guiProgress = QtGui.QProgressBar(self)\n self.sim.simulationProgressChanged.connect(self.guiProgress.setValue)\n self.statusBar().addWidget(self.guiProgress)\n self.runSimulation.emit()", "def runSimulation(numSteps):\n\n rabbit_pop = []\n fox_pop = [] \n \n for steps in range(numSteps):\n rabbitGrowth()\n foxGrowth()\n rabbit_pop.append(CURRENTRABBITPOP)\n fox_pop.append(CURRENTFOXPOP)\n \n return (rabbit_pop, fox_pop)" ]
[ "0.6396689", "0.63829195", "0.6235438", "0.6127556", "0.6113671", "0.6075556", "0.592617", "0.5924301", "0.5908774", "0.5887714", "0.5857056", "0.58460796", "0.58178735", "0.5798798", "0.57717663", "0.57685995", "0.5756951", "0.5755669", "0.5740458", "0.57304794", "0.57215303", "0.57015485", "0.5642847", "0.56383246", "0.5620001", "0.55962753", "0.55794346", "0.55661726", "0.5553669", "0.55251116", "0.5520781", "0.54910797", "0.548815", "0.5468782", "0.5456694", "0.54193", "0.54177", "0.5408923", "0.5398419", "0.53874576", "0.537324", "0.53725433", "0.53560024", "0.5338528", "0.5334181", "0.5325229", "0.5325156", "0.5304009", "0.5303514", "0.5297921", "0.52788645", "0.5269392", "0.5269112", "0.52646977", "0.525974", "0.52519876", "0.5249241", "0.5244243", "0.52364385", "0.5236181", "0.521914", "0.52106434", "0.52049017", "0.52013224", "0.5198933", "0.5197816", "0.51713616", "0.51669514", "0.5159013", "0.51455307", "0.5136622", "0.5131981", "0.5121153", "0.51210475", "0.51202345", "0.51116323", "0.51110315", "0.51105785", "0.5095974", "0.50871795", "0.5085443", "0.50830525", "0.5081026", "0.50750905", "0.50669056", "0.5065885", "0.5065034", "0.5063258", "0.5058114", "0.50579673", "0.50565845", "0.5054981", "0.50456035", "0.5039255", "0.50387424", "0.5036604", "0.50359803", "0.5027855", "0.5026917", "0.502567" ]
0.6316884
2
Run the benchmark for the given number of steps.
def run(self, steps): if not self.skip_reference: self.reference_sim.run(steps) self.compare_sim.run(steps)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self, steps = 1000):\n for step in range(steps):\n if self.is_done():\n return\n self.step()", "def run(self, steps=1000):\n for step in range(steps):\n if self.is_done():\n return\n self.step()", "def run(self, steps=1000):\n for step in range(steps):\n if self.is_done():\n return\n self.step()", "def run(self, steps):\n self.sim.run(steps)", "def run(self, step:int=0):\n if step > 0:\n _range = range(self.current_step, self.current_step + step + 1)\n else: # run forever\n _range = itertools.count(self.current_step)\n for step_num in _range:\n self.step()", "def run_one(num):\n start = time.time()\n if not config.get('radosbench'):\n benchcontext = {}\n else:\n benchcontext = copy.copy(config.get('radosbench'))\n iterations = 0\n while time.time() - start < int(config.get('time', 600)):\n log.info(\"Starting iteration %s of segment %s\"%(iterations, num))\n benchcontext['pool'] = str(num) + \"-\" + str(iterations)\n with radosbench.task(ctx, benchcontext):\n time.sleep()\n iterations += 1", "def run_trials(self, num=0):\n if num == 'all':\n self.trials_to_run = len(self.trials)\n else:\n self.trials_to_run = num\n self.vision_egg.go()", "def run(num_trials):\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.1, display=True) \n # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=num_trials) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n a.performace_report(num_trials)", "def perform_mult_steps(self, num_steps, tc, batch_size):\n\n self.sample_v()\n self.cli.empty().print(self.titlestr % (\"k\", \"objective\", *[f\"lm_{i} x constr_{i}\" for i in range(len(self.lagrange_mults))]))\n\n # perform a min step\n for s in range(num_steps):\n batch = tc.sample(batch_size, ['add_act', 'grad_ranking', 'add_pi'])\n self.perform_step(s, batch)", "def lab_run_big(character_id, time_step):\n pass", "def simulate(self, n, dt=None):\n for _ in range(n):\n self.step(dt)", "def setNumIterations(*argv):", "def execute_timesteps(self, num_timesteps, max_timesteps_per_episode=0, update_spec=None, deterministic=False):\n pass", "def run_circuit_and_measure(\n self, circuit: Circuit, n_samples: Optional[int] = None, **kwargs\n ) -> Measurements:\n self.number_of_circuits_run += 1\n self.number_of_jobs_run += 1", "def run_trials(environ, total):\n run_times = []\n\n for i in range(0, total):\n environ.run()\n run_times.append(environ.total_time)\n\n return run_times", "def setNumTimeSubSteps(*argv):", "def main(num_trials, num_actions):\n\tfor i in xrange(int(num_trials)):\n\t\ttrial(i+1, int(num_actions))", "def run_verbose(self, steps = 10):\n for step in range(steps):\n if self.is_done():\n print 'Done, stopping.'\n print self.to_string()\n return\n print self.to_string()\n self.step()", "def steps(self,num_steps):\n if self.last_sensation == TERMINAL_STATE:\n self.start_episode()\n for step in range(num_steps):\n next_sensation,reward = self.env(self.next_action)\n self.collect_data(self.last_sensation, self.next_action, reward, next_sensation)\n self.next_action = self.agent(next_sensation,reward)\n self.last_sensation = next_sensation\n if self.last_sensation == TERMINAL_STATE:\n self.start_episode()", "def run_burn_in(self, n_burn: int) -> None:\n for n in range(n_burn):\n self.perform_step()\n return", "def run_amount_of_ticks(self, amount):\n\t\tfor i in range(amount):\n\t\t\tself.run_tick()", "def run_test(_freq, cmd):\n for count in range(_freq):\n os.system(cmd.replace(\"result\", \"result\" + str(count + 1)))", "def run_simulation(self, num_games=10):\n for _ in range(num_games):\n self.result.append(self.single_game())", "def runner_scenario_x_times(repetitions, scenario_names, feature_files, out):\n if scenario_names is not None:\n to_test = scenario_names\n elif feature_files is not None:\n to_test = feature_files\n else:\n to_test = \"testsuite\"\n msg = (\"\\nRunning \" + str(repetitions) + \" times test(s):\\n \" \n + str(to_test) + \"\\n\")\n print(msg)\n if out:\n out_name = os.path.splitext(out)[0]\n ext = os.path.splitext(out)[1]\n for i in range(repetitions):\n print(\"Iteration number: \" + str(i+1))\n if out:\n out = out_name + \"-\" + str(i) + ext\n p = Process(target=worker_scenario, \n args=(scenario_names, feature_files, out))\n p.start()\n p.join()", "def run():\n step = 0\n while traci.simulation.getMinExpectedNumber() > 0:\n traci.simulationStep()\n step+=1\n traci.close()\n sys.stdout.flush()", "def lab_run_small(character_id, time_step):\n pass", "def run_multiple_test_cycles(self):\n # Perform as many cycles as required\n while self.args.repetitions >= 0:\n self.run_one_test_cycle()\n self.args.repetitions -= 1", "def benchmark(self):\n logger.info(self.benchmark.__doc__)\n return self.run(self.benchmark_profile())", "def run_benchmarks(urls, urlIndices, trial_number):\n path.append(os.path.join(CHROMIUM_SRC, 'tools/perf/'))\n benchmark_path = os.path.join(CHROMIUM_SRC, 'tools/perf/run_benchmark')\n output_path = 'temp'\n trial_key = 'trial{0}'.format(trial_number)\n\n cmd = ('sudo ' + benchmark_path + ' --profiler=trace telemetryBenchmarks.url{0}')\n for i in urlIndices:\n try:\n out, err, returncode = get_benchmark_result(cmd.format(i))\n timeout = False\n print 'successfully ran benchmark for url' + str(i)\n except TimeoutError:\n # Benchmark failed\n print 'Benchmark Timeout!'\n out = ''\n returncode = 1\n timeout = True\n\n failed = ['FAILED']\n if returncode != 0 or any(x in out for x in failed) or timeout:\n # If a benchmark fails, remove its corresponding wpr file, and act\n # as if it didn't exist\n # Remove from data/wpr_source\n print 'Benchmark {0} failed'.format(i)\n print 'return code is ' + str(returncode)\n print 'Out:'\n print out\n print 'Err:'\n print err\n urlName = 'url{0}_page_set_000.wpr'.format(i)\n urlpcName = 'url{0}_pc_page_set_000.wpr'.format(i)\n urlFilePath = os.path.join('data/wpr_source',urlName)\n urlpcFilePath = os.path.join('data/wpr_source',urlpcName)\n urlCmd = 'rm -f {0}'.format(urlFilePath)\n urlpcCmd = 'rm -f {0}'.format(urlpcFilePath)\n print 'Removing: {0}, {1}'.format(urlFilePath, urlpcFilePath)\n commands = [\n 'rm -f {0}'.format(urlFilePath),\n 'rm -f {0}'.format(urlpcFilePath)\n ]\n for cmdss in commands:\n p = Popen(cmdss, shell=True)\n p.wait()\n # Skip the rest of this url\n print \"Moving on!\"\n continue\n\n # Parse data\n tmp_path = 'temp/tmp_benchmark_result_json'\n with open(tmp_path, 'rb') as f:\n tmp_json = json.load(f)\n benchmark_results = tmp_json['values']\n commands = [\n 'rm -f ~/page_load_time/telemetry/temp/tmp_benchmark_result_json',\n ]\n for cmds in commands:\n p = Popen(cmds, shell=True)\n p.wait()\n\n output = {urls[i]: {'cold_times': {trial_key: benchmark_results}}}\n output_file = os.path.join(output_path, urlsafe_b64encode(urls[i]))\n output_file += '.' + str(trial_number)\n try:\n with open(output_file, 'w') as f:\n json.dump(output, f)\n except IOError:\n raise IOError('Unable to write to {0}'.format(output_file))\n\n\n ############### Now run for Perfect Cache file ################\n\n try:\n out, err, returncode = \\\n get_benchmark_result(cmd.format(str(i) + '_pc'))\n timeout = False\n print 'successfully ran benchmark for url' + str(i) + '_pc'\n except TimeoutError:\n # Benchmark failed\n print 'Benchmark Timeout!'\n out = ''\n returncode = 1\n timeout = True\n\n failed = ['FAILED']\n if returncode != 0 or any(x in out for x in failed) or timeout:\n # If a benchmark fails, remove its corresponding wpr file, and act\n # as if it didn't exist\n # Remove from data/wpr_source\n\n print 'Benchmark {0}_pc failed'.format(i)\n print 'Out:'\n print out\n print 'Err:'\n print err\n urlName = 'url{0}_page_set_000.wpr'.format(i)\n urlpcName = 'url{0}_pc_page_set_000.wpr'.format(i)\n urlFilePath = os.path.join('data/wpr_source',urlName)\n urlpcFilePath = os.path.join('data/wpr_source',urlpcName)\n urlCmd = 'rm -f {0}'.format(urlFilePath)\n urlpcCmd = 'rm -f {0}'.format(urlpcFilePath)\n print 'Removing: {0}, {1}'.format(urlFilePath, urlpcFilePath)\n commands = [\n 'rm -f {0}'.format(urlFilePath),\n 'rm -f {0}'.format(urlpcFilePath)\n ]\n for cmdss in commands:\n p = Popen(cmdss, shell=True)\n p.wait()\n # Skip the rest of this url\n print \"Moving on!\"\n continue\n\n # Parse data\n tmp_path = 'temp/tmp_benchmark_result_json'\n with open(tmp_path, 'rb') as f:\n tmp_json = json.load(f)\n benchmark_results = tmp_json['values']\n\n commands = [\n 'rm -f ~/page_load_time/telemetry/temp/tmp_benchmark_result_json',\n ]\n for cmds in commands:\n p = Popen(cmds, shell=True)\n p.wait()\n\n output = {urls[i]: {'cold_times': {trial_key: benchmark_results}}}\n output_file = os.path.join(output_path, urlsafe_b64encode(urls[i]))\n output_file += '.' + str(trial_number) + '.pc'\n try:\n with open(output_file, 'w') as f:\n json.dump(output, f)\n except IOError:\n raise IOError('Unable to write to {0}'.format(output_file))", "def runTimingTests(c, startNx, endNx, stepNx, displayResults = False):\n timesArray = []\n nxs = np.empty(shape=[0])\n iteration = 0\n\n for currNx in range(startNx, endNx, stepNx):\n nx = currNx\n nt = nx\n nxs = np.append(nxs, nx)\n _, timesSmooth, _, _ = main(nx, nt, c, displayResults = False)\n timesArray = np.append(timesArray, timesSmooth)\n iteration = iteration+1\n \n timesArray = timesArray.reshape(iteration, len(timesSmooth)) \n timesArray = np.matrix.transpose(timesArray)\n logNxs = np.log10(nxs)\n logTimes = np.log10(timesArray)\n methods = [\"FTBS\", \"CTCS\", \"CNCS\", \"LaxWendroff\"]\n if(display):\n for i in range (0, 4):\n plt.plot(logNxs, logTimes[i], label=methods[i])\n coeff = np.polyfit(logNxs,logTimes[i],1)\n print(\"Estimated order of magnitude time vs nx \"\\\n +methods[i]+\": \"+str(coeff[0]))\n plt.title(\"Log-log plot time of execution in s vs nx\\nc=\"+str(c))\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n plt.show()", "def steps(self, step_count):\n self.dir.value(0 if step_count > 0 else 1)\n for i in range(abs(step_count)):\n self.stp.value(1)\n sleep_us(self.step_time)\n self.stp.value(0)\n sleep_us(self.step_time)\n self.current_position += step_count", "def run(self):\n self.speed_test.start()", "def main(benchmark, size=None, backend=None, repetitions=None, burnin=1, device=\"cpu\"):\n try:\n bm_module, bm_identifier = get_benchmark_module(benchmark)\n except ImportError as e:\n click.echo(f\"Error while loading benchmark {benchmark}: {e!s}\", err=True)\n raise click.Abort()\n\n available_backends = set(bm_module.__implementations__)\n\n if len(backend) == 0:\n backend = available_backends.copy()\n else:\n backend = set(backend)\n\n unsupported_backends = [b for b in backend if b not in available_backends]\n\n for b in unsupported_backends:\n click.echo(\n f'Backend \"{b}\" is not supported by chosen benchmark (skipping)', err=True\n )\n backend.remove(b)\n\n for b in backend.copy():\n try:\n with setup_functions[b](device=device) as bmod:\n click.echo(f\"Using {b} version {bmod.__version__}\")\n except BackendNotSupported as e:\n click.echo(\n f'Setup for backend \"{b}\" failed (skipping), reason: {e!s}', err=True\n )\n backend.remove(b)\n\n try:\n check_backend_conflicts(backend, device)\n except BackendConflict as exc:\n click.echo(f\"Backend conflict: {exc!s}\", err=True)\n raise click.Abort()\n\n runs = sorted(itertools.product(backend, size))\n\n if len(runs) == 0:\n click.echo(\"Nothing to do\")\n return\n\n timings = {run: [] for run in runs}\n\n if repetitions is None:\n click.echo(\"Estimating repetitions...\")\n repetitions = {}\n\n for b, s in runs:\n # use end-to-end runtime for repetition estimation\n def run_func():\n run = bm_module.get_callable(b, s, device=device)\n with setup_functions[b](device=device):\n run()\n\n repetitions[(b, s)] = estimate_repetitions(run_func)\n else:\n repetitions = {(b, s): repetitions for b, s in runs}\n\n all_runs = list(\n itertools.chain.from_iterable(\n [run] * (repetitions[run] + burnin) for run in runs\n )\n )\n random.shuffle(all_runs)\n\n results = {}\n checked = {r: False for r in runs}\n\n pbar = click.progressbar(\n label=f\"Running {len(all_runs)} benchmarks...\", length=len(runs)\n )\n\n try:\n with pbar:\n for (b, size) in all_runs:\n with setup_functions[b](device=device):\n run = bm_module.get_callable(b, size, device=device)\n with Timer() as t:\n res = run()\n\n # YOWO (you only warn once)\n if not checked[(b, size)]:\n if size in results:\n is_consistent = check_consistency(\n results[size], convert_to_numpy(res, b, device)\n )\n if not is_consistent:\n click.echo(\n f\"\\nWarning: inconsistent results for size {size}\",\n err=True,\n )\n else:\n results[size] = convert_to_numpy(res, b, device)\n checked[(b, size)] = True\n\n timings[(b, size)].append(t.elapsed)\n pbar.update(1.0 / (repetitions[(b, size)] + burnin))\n\n # push pbar to 100%\n pbar.update(1.0)\n\n for run in runs:\n assert len(timings[run]) == repetitions[run] + burnin\n\n finally:\n stats = compute_statistics(timings)\n click.echo(format_output(stats, bm_identifier, device=device))", "def run(self):\n last = self.system.last_timestep\n start = last.timestep + 1 if last else 0\n del last\n end = self.system.cg_steps\n \n logging.info(\"running timesteps {} to {}\".format(start, end))\n \n for _ in range(start, end):\n self.system.begin_timestep()\n self.atomistic_step()\n self.cg_step()\n self.system.end_timestep()\n \n logging.info(\"completed all {} timesteps\".format(end-start))", "def test_after_jam_step_two(self):\n for test_suite_class in self.jam_step_2_test_suite_list:\n test_suite = test_suite_class(self)\n results = test_suite.run()\n self.test_results += results", "def main():\n parser = optparse.OptionParser()\n parser.add_option('--debug', action='store_true', default=False,\n help='run in debug mode')\n parser.add_option('-i', '--iteration', type=int, default=DEFAULT_ITERATION,\n metavar='NUM',\n help='set the number of iterations for each test (defualt:%d)' % \\\n DEFAULT_ITERATION)\n parser.add_option('-f', '--fstypes', default='ext2,ext3,ext4,btrfs,xfs',\n type='string', metavar='TYPES', help='set the file systems to test')\n parser.add_option('-n', '--num', default=10000, type=int, metavar='NUM',\n help='set the number of file created')\n parser.add_option('-N', '--numa', action='store_true', default=False,\n help='run NUMA test')\n parser.add_option('-S', '--scalability', action='store_true', default=False,\n help='run scalability test')\n global options\n options, args = parser.parse_args()\n\n benchutils.check_root_or_die()\n suffix = ''\n if options.numa:\n suffix = 'numa'\n else:\n suffix = 'scale'\n output_dir = benchutils.get_output_directory(suffix=suffix, timestamp=True)\n fstypes = options.fstypes.split(',')\n for fs in fstypes:\n if options.numa:\n run_tests(output_dir, fs)\n elif options.scalability:\n run_scalability_tests(output_dir, fs)", "def run_scrapping():\n date = datetime.now().strftime(\"%Y-%m-%d\")\n size = 100\n r = list(range(size))\n random.shuffle(r)\n for i in r:\n scrap_page(url_page.format(i), date)\n print(str(i) + \" / \" + str(size))", "def run_step(self, milliseconds):\n stopDistance = self.params['safeDistance']\n\n timeStep = timedelta(milliseconds=milliseconds)\n newTime = self.time + timeStep # Time after step is performed.\n\n for light in self._lights:\n if newTime > light.getNextSwitchTime():\n light.switch(newTime)\n\n toRemove = [ ]\n for car in self._cars:\n if car.state != Car.DELETED:\n car.prepareMove(timeStep)\n else:\n toRemove.append(car)\n\n for car in toRemove: self._cars.remove(car)\n for car in self._cars: car.finishMove()\n\n # Generate new car.\n # It is always added to the queue and if there is enough place then\n # it will be instantly added to the road.\n carsToAdd, newLastCarTime = self.howManyCarsToAdd(newTime)\n self.addCars(carsToAdd)\n self._lastCarGenerationTime = newLastCarTime\n\n self.addCarsFromQueueToRoad()\n\n # Update time.\n self.time = newTime", "def steps_to_run(current_step, steps_per_epoch, steps_per_loop):\n if steps_per_loop <= 0:\n raise ValueError('steps_per_loop should be positive integer.')\n if steps_per_loop == 1:\n return steps_per_loop\n remainder_in_epoch = current_step % steps_per_epoch\n if remainder_in_epoch != 0:\n return min(steps_per_epoch - remainder_in_epoch, steps_per_loop)\n else:\n return steps_per_loop", "def single_run(steps_number):\n values = list()\n numerator = 0\n for i in trange(1, steps_number):\n\n numerator += generate_episode()\n\n values.append(numerator / i)\n\n return np.array(values)", "def test(self, n_test_runs: int = 10) -> None:\n steps: np.ndarray = np.zeros(n_test_runs)\n rewards: np.ndarray = np.zeros(n_test_runs)\n for t in range(n_test_runs):\n steps[t], rewards[t] = self.step(collect=False)\n\n self.get_logger().warn('---------- TEST RUN RESULTS ----------')\n self.get_logger().warn(f'Average: {steps.mean()}')\n self.get_logger().warn(f'STD: {steps.std()}')\n self.get_logger().warn(f'Median: {np.median(steps)}')\n self.get_logger().warn(f'Average Reward: {rewards.mean()}')", "def WarpStep(iters=5):\n MSG(\"WarpStep\")\n for j in range(iters):\n warp.step()\n return", "def execute_and_get_timesteps(self, num_timesteps, max_timesteps_per_episode=0, deterministic=False):\n pass", "def run(self, n=1, speed=1.0, rnd=0, filename=None, start_frame=0, verbose=True, crop=None):\n if verbose and filename:\n print 'rendering %s frames as %s ... %s' % (n, (filename % start_frame), (filename % (start_frame + n - 1)))\n for k in xrange(n):\n self.z += rnd * rand(*self.z.shape)\n self.step(speed=speed)\n if filename:\n out = self.rgb_image()\n if crop:\n out = out[crop[0]:crop[1],crop[2]:crop[3],...]\n imsave(filename % (k + start_frame), out)\n if verbose:\n print n - k,\n sys.stdout.flush()", "def run(self, n_sweeps, therm_factor=0.1, sweep_factor=1, n_flips=None):\n if self.samples_file:\n f = open(self.samples_file, 'w')\n\n if n_flips:\n n_flips = self.hamiltonian.min_flips()\n if n_flips != 1 and n_flips != 2:\n raise ValueError('Invalid number of spin flips')\n if not (0 <= therm_factor <= 1):\n raise ValueError('The thermalization factor should be a real '\n 'number between 0 and 1')\n if n_sweeps < 50:\n raise ValueError('Too few steps in MC. Please use at least 50')\n\n print('Starting MC Sampling')\n print('Will perform {} steps'.format(n_sweeps))\n\n self.nqs.init_lookup_tables(self.current_state)\n self.reset_sampler_values()\n\n if therm_factor != 0:\n print('Starting Thermalization')\n\n n_moves = int(therm_factor * n_sweeps) * \\\n int(sweep_factor * self.n_visible)\n for _ in range(n_moves):\n self.move(n_flips)\n\n print('Completed Thermalization')\n\n self.reset_sampler_values()\n\n print('Starting Monte Carlo Sampling')\n\n for i in range(int(n_sweeps)):\n for _ in range(int(sweep_factor * self.n_visible)):\n self.move(n_flips)\n self.current_Hloc = self.local_energy()\n self.state_history.append(np.array(self.current_state))\n self.local_energies.append(self.current_Hloc)\n if self.samples_file:\n self.write_current_state(f)\n\n print('Completed Monte Carlo Sampling')\n\n if self.samples_file:\n f.close()\n\n return self.estimate_wf_energy()", "def execute(self):\n print_verbose_messages = (self.verbose\n and self.device.communicator.rank == 0)\n\n # Ensure that all ops are attached (needed for is_tuning_complete).\n self.run(0)\n\n if print_verbose_messages:\n print(f'Running {type(self).__name__} benchmark')\n\n if print_verbose_messages:\n print(f'.. warming up for {self.warmup_steps} steps')\n self.run(self.warmup_steps)\n\n if (isinstance(self.device, hoomd.device.GPU)\n and hasattr(self.sim.operations, 'is_tuning_complete')):\n while not self.sim.operations.is_tuning_complete:\n if print_verbose_messages:\n print('.. autotuning GPU kernel parameters for '\n f'{self.warmup_steps} steps')\n self.run(self.warmup_steps)\n\n if print_verbose_messages:\n print(f'.. running for {self.benchmark_steps} steps '\n f'{self.repeat} time(s)')\n\n # benchmark\n performance = []\n\n if isinstance(self.device, hoomd.device.GPU):\n with self.device.enable_profiling():\n for i in range(self.repeat):\n self.run(self.benchmark_steps)\n performance.append(self.get_performance())\n if print_verbose_messages:\n print(f'.. {performance[-1]} {self.units}')\n else:\n for i in range(self.repeat):\n self.run(self.benchmark_steps)\n performance.append(self.get_performance())\n if print_verbose_messages:\n print(f'.. {performance[-1]} {self.units}')\n\n return performance", "def run_tests(self):\n with self.report.timer.record(\"run\"):\n self.result.report.extend(self._run_tests())", "def test_benchmark(self):\n\n proc = subprocess.Popen([\n sys.executable,\n benchmark.__file__,\n self.live_server_ws_url,\n ])\n for _ in range(0, 90, 5):\n time.sleep(5)\n if proc.returncode:\n break\n else:\n proc.terminate()\n proc.wait()\n assert proc.returncode == 0", "def RunSuite(config, files, extra_flags, errors):\n global ERRORS, CONCURRENCY\n Banner('running %d tests' % (len(files)))\n pool = multiprocessing.Pool(processes=CONCURRENCY)\n # create a list of run arguments to map over\n argslist = [(num, len(files), config, test, extra_flags)\n for num, test in enumerate(files)]\n # let the process pool handle the test assignments, order doesn't matter\n pool.map(RunTest, argslist)\n while not ERRORS.empty():\n phase, test = ERRORS.get()\n errors[phase].append(test)", "def time(n):\n steps = 3 + math.ceil(n/5.0)*2\n return steps", "def step(self, n, dlist):\n pass", "def step(self, steps):\n self._simulate(endStep=self.currentStep+steps)", "def set_number_of_time_steps(self, number_of_time_steps):\n self.number_of_time_steps = number_of_time_steps", "def warmup_step(ckpt_step: int) -> float:\n return ckpt_step * 10", "def run(address, nsteps, speed1=6, speed2=6):\n m1i = 0\n m2i = 0\n for i in range(nsteps):\n m1i = _speed_action(i, speed1, m1i)\n m2i = _speed_action(i, speed2, m2i)\n \n byte_to_send = MOTORS[m1i][m2i] \n try:\n i2c.write(address, byte_to_send)\n except:\n display.show(Image.SURPRISED)\n sleep(1000)\n break\n sleep(5)", "def _step(self):\n title()\n self.runCount = 1\n self.experiment.pause = False\n self._runExperiment()\n self.pause = True", "def test_multiple_games(self, iteration=10):\n # TODO: multithread?\n for i in range(iteration):\n self.test_one_game()", "def verilog_thread(name, step):\n run_command([\"./run_cadence.sh\", name, str(step), pdn, supply])", "def runSimulation(numSteps):\r\n\r\n # TO DO\r\n #pass\r\n rabbits = []\r\n foxes = []\r\n for i in range(numSteps):\r\n rabbitGrowth()\r\n foxGrowth()\r\n rabbits.append(CURRENTRABBITPOP)\r\n foxes.append(CURRENTFOXPOP)\r\n return rabbits, foxes", "def runSimulation(numSteps):\r\n\r\n # TO DO\r\n #pass\r\n rabbits = []\r\n foxes = []\r\n for i in range(numSteps):\r\n rabbitGrowth()\r\n foxGrowth()\r\n rabbits.append(CURRENTRABBITPOP)\r\n foxes.append(CURRENTFOXPOP)\r\n return rabbits, foxes", "def Run(benchmark_spec):\n _UpdateBenchmarkSpecWithFlags(benchmark_spec)\n vm = benchmark_spec.vms[0]\n\n if benchmark_spec.tpus:\n mnist_benchmark_script = 'mnist_tpu.py'\n mnist_benchmark_cmd = ('cd tpu/models && '\n 'export PYTHONPATH=$(pwd) && '\n 'cd official/mnist && '\n 'python {script} '\n '--data_dir={data_dir} '\n '--iterations={iterations} '\n '--model_dir={model_dir} '\n '--batch_size={batch_size}'.format(\n script=mnist_benchmark_script,\n data_dir=benchmark_spec.data_dir,\n iterations=benchmark_spec.iterations,\n model_dir=benchmark_spec.model_dir,\n batch_size=benchmark_spec.batch_size))\n else:\n mnist_benchmark_script = 'mnist.py'\n mnist_benchmark_cmd = ('cd models && '\n 'export PYTHONPATH=$(pwd) && '\n 'cd official/mnist && '\n 'python {script} '\n '--data_dir={data_dir} '\n '--model_dir={model_dir} '\n '--batch_size={batch_size} '.format(\n script=mnist_benchmark_script,\n data_dir=benchmark_spec.data_dir,\n model_dir=benchmark_spec.model_dir,\n batch_size=benchmark_spec.batch_size))\n\n if nvidia_driver.CheckNvidiaGpuExists(vm):\n mnist_benchmark_cmd = '{env} {cmd}'.format(\n env=tensorflow.GetEnvironmentVars(vm), cmd=mnist_benchmark_cmd)\n samples = []\n metadata = CreateMetadataDict(benchmark_spec)\n\n if benchmark_spec.train_steps > 0:\n if benchmark_spec.tpus:\n tpu = benchmark_spec.tpu_groups['train'].GetName()\n num_shards = '--num_shards={}'.format(\n benchmark_spec.tpu_groups['train'].GetNumShards())\n else:\n tpu = num_shards = ''\n\n if benchmark_spec.tpus:\n mnist_benchmark_train_cmd = (\n '{cmd} --tpu={tpu} --use_tpu={use_tpu} --train_steps={train_steps} '\n '{num_shards} --noenable_predict'.format(\n cmd=mnist_benchmark_cmd,\n tpu=tpu,\n use_tpu=bool(benchmark_spec.tpus),\n train_steps=benchmark_spec.train_steps,\n num_shards=num_shards))\n else:\n mnist_benchmark_train_cmd = (\n '{cmd} --train_epochs={train_epochs} '.format(\n cmd=mnist_benchmark_cmd,\n train_epochs=benchmark_spec.train_epochs))\n\n start = time.time()\n stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_train_cmd)\n elapsed_seconds = (time.time() - start)\n samples.extend(MakeSamplesFromTrainOutput(\n metadata, stdout + stderr, elapsed_seconds, benchmark_spec.train_steps))\n\n if benchmark_spec.eval_steps > 0:\n if benchmark_spec.tpus:\n mnist_benchmark_eval_cmd = (\n '{cmd} --tpu={tpu} --use_tpu={use_tpu} --eval_steps={eval_steps}'\n .format(\n cmd=mnist_benchmark_cmd,\n use_tpu=bool(benchmark_spec.tpus),\n tpu=benchmark_spec.tpu_groups['eval'].GetName(),\n eval_steps=benchmark_spec.eval_steps))\n else:\n mnist_benchmark_eval_cmd = ('{cmd} --eval_steps={eval_steps}'.format(\n cmd=mnist_benchmark_cmd, eval_steps=benchmark_spec.eval_steps))\n\n stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_eval_cmd)\n samples.extend(MakeSamplesFromEvalOutput(metadata, stdout + stderr,\n elapsed_seconds))\n return samples", "def threadsInBatches_run(l_threadAnalysis):\n index = 1\n if self.numThreads > total:\n self.numThreads = total\n threadFullLoops = int(total / self.numThreads)\n threadRem = total % self.numThreads\n alreadyRunCount = thread_batch(\n l_threadAnalysis,\n threadFullLoops,\n self.numThreads,\n 0)\n nextRunCount = thread_batch(\n l_threadAnalysis,\n 1,\n threadRem,\n alreadyRunCount)", "def process(self, args):\n for benchmark_file in args.benchmark_files:\n self.process_individual_file(benchmark_file)\n self.total_files += 1", "def train(self, steps):\r\n for e in range(steps):\r\n # do something...\r\n pass\r\n return self.get_value_function()", "def run_stage_loop(cls, _opts, tests_results, put_next_stage):\n for _, result in tests_results:\n put_next_stage(result)", "def run_benchmark(curl, benchmark, test_config = TestConfig()):\n\n warmup_runs = benchmark.warmup_runs\n benchmark_runs = benchmark.benchmark_runs\n message = '' #Message is name of benchmark... print it?\n\n if (warmup_runs <= 0):\n raise Exception(\"Invalid number of warmup runs, must be > 0 :\" + warmup_runs)\n if (benchmark_runs <= 0):\n raise Exception(\"Invalid number of benchmark runs, must be > 0 :\" + benchmark_runs)\n\n #Initialize variables to store output\n output = BenchmarkResult()\n output.name = benchmark.name\n output.group = benchmark.group\n metricnames = list(benchmark.metrics)\n metricvalues = [METRICS[name] for name in metricnames] # Metric variable for curl, to avoid hash lookup for every metric name\n results = [list() for x in xrange(0, len(metricnames))] # Initialize arrays to store results for each metric\n\n curl.setopt(pycurl.WRITEFUNCTION, lambda x: None) #Do not store actual response body at all.\n\n #Benchmark warm-up to allow for caching, JIT compiling, on client\n logging.info('Warmup: ' + message + ' started')\n for x in xrange(0, warmup_runs):\n if benchmark.method == u'POST' or benchmark.method == u'PUT':\n curl.setopt(curl.READFUNCTION, StringIO.StringIO(benchmark.body).read)\n curl.perform()\n logging.info('Warmup: ' + message + ' finished')\n\n logging.info('Benchmark: ' + message + ' starting')\n\n for x in xrange(0, benchmark_runs): # Run the actual benchmarks\n if benchmark.method == u'POST' or benchmark.method == u'PUT':\n curl.setopt(curl.READFUNCTION, StringIO.StringIO(benchmark.body).read)\n\n try: # Run the curl call, if it errors, then add to failure counts for benchmark\n curl.perform()\n except Exception:\n output.failures = output.failures + 1\n continue # Skip metrics collection\n\n # Get all metrics values for this run, and store to metric lists\n for i in xrange(0, len(metricnames)):\n results[i].append( curl.getinfo(metricvalues[i]) )\n\n logging.info('Benchmark: ' + message + ' ending')\n\n temp_results = dict()\n for i in xrange(0, len(metricnames)):\n temp_results[metricnames[i]] = results[i]\n output.results = temp_results\n\n curl.close()\n return analyze_benchmark_results(output, benchmark)", "def batch_steps(num_examples, batch_size):\n steps = num_examples // batch_size\n if num_examples % batch_size > 0:\n steps += 1\n return steps", "def _run_benchmark(self, params):\n logging.info('Running benchmark [%s]', self._get_name())\n params = benchmark_cnn.setup(params)\n bench = benchmark_cnn.BenchmarkCNN(params)\n bench.print_info()\n stats = bench.run()\n extras = {}\n extras['examples_per_sec'] = stats.get('images_per_sec')\n if 'last_average_loss' in stats:\n extras['last_average_loss'] = stats['last_average_loss']\n if 'top_1_accuracy' in stats:\n extras['top_1_accuracy'] = stats['top_1_accuracy']\n if 'top_5_accuracy' in stats:\n extras['top_5_accuracy'] = stats['top_5_accuracy']\n self.report_benchmark(\n iters=stats.get('num_steps'),\n wall_time=stats.get('average_wall_time'),\n extras=extras)", "def run_all_iterations(self):\n self.start_time = time.time()\n for _ in xrange(self.iterations):\n self.run_iteration()\n self.elapsed_time = time.time() - self.start_time\n\n self.print_statistics()", "def run(self, repetitions, **kwargs):\n\t\tself.sampler.sample(repetitions, **kwargs)", "def test_number_of_steps(self):\n class Mock(object):\n def __init__(self):\n self.count = 0\n\n def evolve(self, t, dt):\n self.count += 1\n\n G = Mock()\n sim = simulation.Simulation(G, dt=0.1)\n\n sim.run(100.0)\n self.assertEqual(G.count, 1000)\n\n G = Mock()\n sim = simulation.Simulation(G, dt=0.2)\n sim.run(100.2)\n self.assertEqual(G.count, 501)", "def test_numbers(number):\n print(\"\\nRunning test_numbers with {}\".format(number))", "def run_benchmark(self, test_config, instance, copy=0):\n # Timestamp and other values added for reporting\n result_dir = self.results_directory(test_config)\n test_config['timestamp'] = int(time.time())\n test_config['workspace'] = self.workspace\n cmd = self._cmd_builder(test_config)\n test_config['cmd'] = cmd\n total_batches = test_config['total_batches']\n\n test_home = os.path.join(self.bench_home, test_config['cmd_path'])\n\n # Write config to results folder\n config_file_out = os.path.join(result_dir, 'config.yaml')\n config_out = open(config_file_out, 'w')\n config_out.write(yaml.dump(test_config))\n config_out.close()\n\n # TODO(tobyboyd@): No longer distributed remove threads.\n worker_threads = []\n i = 0\n cmd = 'cd {}; {}'.format(test_home, cmd)\n print('[{}] worker | Run benchmark({}):{}'.format(\n copy, test_config['test_id'], cmd))\n stdout_file = os.path.join(result_dir, 'worker_%d_stdout.log' % i)\n stderr_file = os.path.join(result_dir, 'worker_%d_stderr.log' % i)\n t = instance.ExecuteCommandInThread(\n cmd, stdout_file, stderr_file, print_error=True)\n worker_threads.append(t)\n\n # Wait for log file to appear\n wait_time = 0\n while t.is_alive() and not os.path.isfile(stdout_file):\n print('Waiting for log file. Waited for {} seconds.'.format(wait_time))\n time.sleep(2)\n wait_time += 2\n\n # TODO(tobyboyd@) fix fragile check for batch to stop on.\n # Example: Epoch: [0][130/40037] Time 0.397\n batch_killer = '{}/'.format(total_batches)\n while t.is_alive():\n with open(stdout_file, 'r') as log:\n for line in log:\n if batch_killer in line:\n print('{} batches complete. Kill Thread.'.format(batch_killer))\n instance.kill_processes()\n break\n time.sleep(5)\n\n for t in worker_threads:\n t.join()\n\n return result_dir", "def run_benchmark():\n import argparse\n parser = argparse.ArgumentParser(description='Benchmark alchemically modified system against unmodified system.')\n parser.add_argument('--platform', dest='platform_name', action='store', default=None, help='platform name to benchmark (default: None)')\n options = parser.parse_args()\n\n from sams.tests import testsystems\n for testsystem_name in ['AblImatinibExplicitAlchemical']:\n cls = getattr(testsystems, testsystem_name)\n testsystem = cls()\n factory_args = { 'ligand_atoms' : testsystem.alchemical_atoms, 'receptor_atoms' : range(0,4266) }\n benchmark(testsystem.system, testsystem.positions, platform_name=options.platform_name, nsteps=5000, timestep=1.0*unit.femtoseconds, factory_args=factory_args)", "def train(self, num_episodes, max_episode_steps=100, save_freq=100, render=False):\n while self.episodes_done < num_episodes:\n self.trainOneEpisode(num_episodes, max_episode_steps, save_freq, render)\n self.saveCheckpoint()", "def test_worker_steps(self):\n target_thread_count = 3\n\n class StepCounter(object):\n \"\"\"\n Count the number of times a step is taken in the worker.\n \"\"\"\n def __init__(self):\n self.a_steps = 0\n self.b_steps = 0\n self.c_steps = 0\n\n def __call__(self, index, thread_count):\n self.a_steps += 1\n yield\n self.b_steps += 1\n yield\n self.c_steps += 1\n\n step_counter = StepCounter()\n\n rusher = Rusher(step_counter, target_thread_count)\n rusher.rush()\n self.assertEqual(step_counter.a_steps, target_thread_count)\n self.assertEqual(step_counter.b_steps, target_thread_count)\n self.assertEqual(step_counter.c_steps, 0)", "def run_benchmark(env: Env, in_file):\n\n print('Running benchmarks in', in_file.name)\n # Run file_path through mlir_to_bef and bef_executor and extract the\n # benchmark result.\n return env.run_mlir(in_file.read())", "def sciml_bench_run(smlb_in: RuntimeIn, smlb_out: RuntimeOut):\n # activate monitor\n # Note: To use smlb_out, you must activate it, passing the rank\n # information initialized by your distributed learning environment;\n # for a non-distributed benchmark, simply pass rank=0, local_rank=0\n # and activate_log_on_host(_device)=False; here we use True for\n # demonstration -- the log on host0 and device0 will be the same as\n # that on console except for some small differences in time\n # measurements.\n smlb_out.activate(rank=0, local_rank=0, activate_log_on_host=True,\n activate_log_on_device=True, console_on_screen=True)\n\n # log top level process\n # Note: Calling begin(), ended() and message() on smlb_out.log means\n # calling these functions on console, host and device; nothing\n # happens when calling these functions on an unactivated logger.\n log = smlb_out.log\n log.begin('Running benchmark MNIST_tf_keras')\n\n # parse input arguments (only batch_size and epochs)\n # Note: Use try_get() to get a benchmark-specific argument safely from\n # smlb_in.bench_args (passed by users via -b).\n with log.subproc('Parsing input arguments'):\n # hyperparameters\n batch_size = smlb_in.bench_args.try_get('batch_size', default=64)\n epochs = smlb_in.bench_args.try_get('epochs', default=2)\n log.message(f'batch_size = {batch_size}')\n log.message(f'epochs = {epochs}')\n\n # create datasets\n with log.subproc('Creating datasets'):\n dataset_dir = smlb_in.dataset_dir\n train_set = create_dataset_mnist(dataset_dir / 'train.hdf5', batch_size)\n test_set = create_dataset_mnist(dataset_dir / 'test.hdf5', batch_size)\n log.message(f'Dataset directory: {dataset_dir}')\n\n # create model\n with log.subproc('Creating CNN model'):\n model = create_model_mnist()\n\n # train model\n log.begin('Training CNN model')\n # fit()\n with log.subproc('Running model.fit()'):\n # stamp model.fit in system monitor\n # Note: smlb_out.system will monitor system usage regularly; use\n # smlb_out.system.stamp_event() to stamp an event in the report\n smlb_out.system.stamp_event('model.fit')\n history = model.fit(train_set, epochs=epochs, batch_size=batch_size,\n validation_data=test_set, verbose=0,\n callbacks=[LogEpochCallback(smlb_out)])\n # save model\n with log.subproc('Saving model weights'):\n weights_file = smlb_in.output_dir / 'model_weights.h5'\n model.save(weights_file)\n log.message(f'Saved to: {weights_file}')\n # save history\n with log.subproc('Saving training history'):\n history_file = smlb_in.output_dir / 'training_history.yml'\n with open(history_file, 'w') as handle:\n yaml.dump(history.history, handle)\n log.message(f'Saved to: {history_file}')\n log.ended('Training CNN model')\n\n # predict\n with log.subproc('Making predictions on test set'):\n with h5py.File(dataset_dir / 'test.hdf5', 'r') as h5_file:\n # stamp model.predict in system monitor\n smlb_out.system.stamp_event('model.predict')\n pred = model.predict(np.expand_dims(h5_file['image'][:], -1) / 255)\n correct = np.sum(pred.argmax(axis=1) == h5_file['label'][:])\n log.message(f'{correct} correct predictions for {len(pred)} images '\n f'(accuracy: {correct / len(pred) * 100:.2f}%)')\n\n # end top level\n log.ended('Running benchmark MNIST_tf_keras')", "def run(self):\n\n while not self.__done:\n self.single_cycle()\n\n \"\"\"\n while not self.__done:\n self.step()\n self.debug()\n \"\"\"", "def execute_series(self):\n for n in xrange(self.conf[\"n_runs\"]):\n self.runs[n].execute()", "def run_trials(f, n):\n\tfor value in range(2, 3):\n\t\tprint(\"{:>3}:{:>5}\".format(value, f(n, value)))", "def run(self):\n\n if self.count < self.max_pages:\n self.engine_redis.set(self.crawl_id + \"_count\", self.count)\n self.count = self.count + self.speed\n else:\n self.engine_redis.set(self.crawl_id + \"_count\", -2)\n\n reactor.callLater(5, self.run)", "def simulate(self, num_games):\r\n # self.runs = num_games #Initializes a tracker for the number of runs\r\n for _ in range(num_games):\r\n self.results.append(self._simulate_once())\r\n return self.results", "def run(method, n):\n \n m1,m2 = generate(n)\n \n start = time.time()\n method(m1,m2)\n end = time.time()\n \n exe = end - start\n \n return exe", "def every_n_iters(self, runner: Runner, n: int):\n if runner.iter < self.start_iter:\n return True\n return (runner.iter + 1 - self.start_iter) % n == 0 if n > 0 else False", "def experiment(agent, steps, runs, initialize=None):\n result = 0\n for r in range(runs):\n result += simulate(agent, steps, initialize)\n return result / runs", "def test_time(cmd, samples=16, warmup=4):\n # do testing\n print()\n avg_time = 0\n for s in range(samples + warmup):\n # report progress\n progress = s / (samples + warmup)\n print(CSI_UP + CSI_CLEARLN + \"Testing [{}%]\".format(floor(progress * 100)))\n\n output = shell(cmd) # run command\n tables = csv_mt.read_string(output, parse_float=True) # parse its output\n time = tables[\"statistics\"][\"time_us\"][0] # get its timing data\n\n # skip a few runs to let the system \"warm up\"\n if s >= warmup:\n avg_time += time / samples # compute average execution time\n\n # log the average time for this test case\n return avg_time", "def run_tests(output_dir, fstype):\n global options\n if options.debug:\n print \"Run NUMA test\"\n for num_disks in [2]:\n for num_dirs in range(1, 5):\n postmark = PostMarkTest(output_dir, fstype, num_disks, num_dirs)\n run_one_test(postmark)", "def test_run_sim():\n rnd = rand.Arrivals(31, 40)\n sim.run_sim(2, 1, 3, 4, 24, rnd)", "def run_timesteps(self, nsteps=1):\n if not self.initialized:\n raise RuntimeError(\"OversetSimulation has not been initialized\")\n\n wclabels = \"Pre Conn Solve Post\".split()\n tstart = self.last_timestep + 1\n tend = self.last_timestep + 1 + nsteps\n self.printer.echo(\"Running %d timesteps starting from %d\"%(nsteps, tstart))\n for nt in range(tstart, tend):\n with self.timer(\"Pre\", incremental=True):\n for ss in self.solvers:\n ss.pre_advance_stage1()\n\n with self.timer(\"Conn\", incremental=True):\n if self._do_connectivity(nt):\n self.perform_overset_connectivity()\n\n with self.timer(\"Pre\", incremental=False):\n for ss in self.solvers:\n ss.pre_advance_stage2()\n\n with self.timer(\"Conn\"):\n self.exchange_solution()\n\n with self.timer(\"Solve\"):\n for ss in self.solvers:\n ss.advance_timestep()\n\n with self.timer(\"Post\"):\n for ss in self.solvers:\n ss.post_advance()\n\n self.comm.Barrier()\n wctime = self.timer.get_timings(wclabels)\n wctime_str = ' '.join(\"%s: %.4f\"%(k, v) for k, v in wctime.items())\n self.printer.echo(\"WCTime:\", \"%5d\"%nt, wctime_str, \"Total:\",\n \"%.4f\"%sum(wctime.values()))\n self.last_timestep = tend", "def next ( num = 1 ) :\n return run ( num )", "def benchmark(trials:int):\n def benchmark_method(function:Callable[[int],int]) -> Callable[[int],Tuple[float,str]]:\n def time_wrapper(*args) -> Tuple[float,str]:\n \"\"\" Return the time taken to run a fibonacci method in microseconds \"\"\"\n t1 = time.time()\n for _ in range(trials):\n function(*args)\n return ((time.time()-t1)/trials) * 1e6, function.__name__\n return time_wrapper\n return benchmark_method", "def run_simulations(self,i_iteration,n_samples=None,filename=None):\n\n assert type(i_iteration) is int\n assert type(n_samples) in [type(None),int]\n assert type(filename) in [type(None),str]\n\n\n # define some convenience local variables for readability\n i = i_iteration\n if n_samples is not None:\n _n_samples = self.configuration.sampling_type[i]['n_samples']\n else:\n _n_samples = n_samples\n\n _sampling_type = self.configuration.sampling_type[i]['type']\n if filename is not None:\n _filename = self.configuration.sampling_type[i][n_samples]\n else:\n pass", "def _run():\n subprocess.check_call(\n [\n \"tools/bazel\",\n \"build\",\n \"-c\",\n \"opt\",\n \"test/core/memory_usage/memory_usage_test\",\n ]\n )\n ret = {}\n for name, benchmark_args in _BENCHMARKS.items():\n for scenario, extra_args in _SCENARIOS.items():\n # TODO(chenancy) Remove when minstack is implemented for channel\n if name == \"channel\" and scenario == \"minstack\":\n continue\n try:\n output = subprocess.check_output(\n [\n \"bazel-bin/test/core/memory_usage/memory_usage_test\",\n ]\n + benchmark_args\n + extra_args\n )\n except subprocess.CalledProcessError as e:\n print(\"Error running benchmark:\", e)\n continue\n for line in output.splitlines():\n for key, (pattern, conversion) in _INTERESTING.items():\n m = re.match(pattern, line)\n if m:\n ret[scenario + \": \" + key] = conversion(m.group(1))\n return ret", "def start(self, total: int, name: str = None):\n\n # Clean the run\n self.test_run = RunElements()\n\n if name is not None:\n self.test_run.name = name\n\n self.test_run.total = total\n\n # Init the start run date\n from datetime import datetime\n self.test_run.date = datetime.now().strftime(\"%d-%m-%Y (%H:%M)\")\n\n self.__send_all()", "def run(self, r, niters=10000):\n validator.validate_type(r, rng, param_name='r')\n validator.validate_positive(niters, param_name='niters')\n for _ in xrange(niters):\n # This goes against every object-oriented bone in my body, but the interface must be satisfied\n # And actually Python won't even let me do this because I'm accessing a method in a C++ class...\n # I'd have to write this whole thing in Cython or change the state interface to expose all these\n # functions separately...which might actually be worth doing.\n self._latent._thisptr.get()[0].sample_aux()\n self._latent._thisptr.get()[0].sample_state()\n self._latent._thisptr.get()[0].clear_empty_states()\n self._latent._thisptr.get()[0].sample_hypers(20)\n self._latent._thisptr.get()[0].sample_pi()\n self._latent._thisptr.get()[0].sample_phi()", "def test(numTrials):\n # Your Code Here\n hits = 0.0\n for i in range(numTrials):\n result = trial()\n #print result\n hits += result\n return hits / numTrials", "def number_of_iterations(self) -> int:\n pass", "def run(self):\r\n try:\r\n self.loader.find_and_load_step_definitions()\r\n except StepLoadingError, e:\r\n print \"Error loading step definitions:\\n\", e\r\n return\r\n\r\n results = []\r\n if self.explicit_features:\r\n features_files = self.explicit_features\r\n else:\r\n features_files = self.loader.find_feature_files()\r\n if self.random:\r\n random.shuffle(features_files)\r\n\r\n if not features_files:\r\n self.output.print_no_features_found(self.loader.base_dir)\r\n return\r\n\r\n processes = Pool(processes=self.parallelization)\r\n test_results_it = processes.imap_unordered(\r\n worker_process, [(self, filename) for filename in features_files]\r\n )\r\n \r\n all_total = ParallelTotalResult()\r\n for result in test_results_it:\r\n all_total += result['total']\r\n sys.stdout.write(result['stdout'])\r\n sys.stderr.write(result['stderr'])\r\n\r\n return all_total", "def run_commands(command, number_to_run, temp_file):\n global g_max_runtime_secs\n global g_finished_this_unit_test\n\n temp_string = command.split()\n testname = temp_string[-1]\n temp_string = testname.split('/')\n\n full_command = command + ' > ' + temp_file\n g_finished_this_unit_test = False\n\n for run_index in range(0, number_to_run):\n\n if g_finished_this_unit_test:\n break\n\n child = subprocess.Popen(full_command, shell=True)\n\n while child.poll() is None:\n time.sleep(20)\n# subprocess.call(full_command, shell=True) # run the command,\n\n with open(temp_file, 'r') as thefile: # go into tempfile and grab test run info\n for each_line in thefile:\n\n temp_string = each_line.split()\n if len(temp_string) > 0:\n if temp_string[0] == 'PASS':\n test_time = temp_string[2]\n try:\n runtime = float(test_time[:-1])\n\n print(\"Unit test run time is {0}\".format(runtime))\n if runtime > g_max_runtime_secs:\n g_finished_this_unit_test = True\n\n except:\n print(\"Cannot convert run time. It is {0}\\n\".format(runtime))\n break" ]
[ "0.6976785", "0.6855227", "0.6855227", "0.6431256", "0.631405", "0.6156152", "0.60947746", "0.60345435", "0.5957139", "0.5937996", "0.59057015", "0.5897424", "0.58912295", "0.5865059", "0.582705", "0.58178043", "0.58033776", "0.5785402", "0.57739943", "0.5768402", "0.573326", "0.5715207", "0.570551", "0.5673789", "0.5658428", "0.56475663", "0.56285614", "0.5624873", "0.5624688", "0.56215197", "0.553555", "0.5533949", "0.5515726", "0.550837", "0.54849046", "0.5477575", "0.5477173", "0.5475991", "0.5474632", "0.5471931", "0.5466444", "0.546045", "0.544559", "0.5431427", "0.5423939", "0.54091823", "0.54000455", "0.5399022", "0.5392259", "0.53853726", "0.5378646", "0.5377754", "0.53761244", "0.5363371", "0.53457624", "0.5345229", "0.5333214", "0.532698", "0.5314469", "0.5314469", "0.53111607", "0.5311042", "0.5309969", "0.5295366", "0.52901036", "0.5289517", "0.52890277", "0.52858907", "0.5273705", "0.52735066", "0.5265327", "0.52469313", "0.524639", "0.52455074", "0.52401537", "0.52387434", "0.5236543", "0.5222798", "0.5214499", "0.52099746", "0.5209922", "0.520992", "0.52010083", "0.51964635", "0.5192428", "0.5181974", "0.5170407", "0.51687604", "0.5160273", "0.5158605", "0.515702", "0.5156586", "0.51466614", "0.51434207", "0.51424533", "0.5140828", "0.51312554", "0.51311725", "0.5124137", "0.5120493" ]
0.58645546
14
Override this method to initialize the simulations.
def make_simulations(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialise_sim(self):\n pass", "def initialize_simulation(self) -> Simulation:\n pass", "def _setup_simulation(self\n ) -> None:\n pass", "def __init__(self, simulator):\r\n self.initialize(simulator)", "def make_simulation(self):\n pass", "def experiment_init(self):\n pass", "def setup_simulation(self, **kwargs):\n\n self.distance = self.config[\"site\"][\"distance\"]\n self.num_substations = self.config[\"num_substations\"]\n\n self.initialize_substructure_production()\n self.initialize_installation_vessel()", "def __init__(self) -> None:\n self.simulation = None\n self.update_time = 0.1\n self.time = None\n self.config = None", "def __init__(self):\n self.setupLogger()\n self.revConnect()\n #rev connect must happen FIRST\n super(APISTUB, self).__init__()\n\n self.globalData = CGlobalVariables()\n \n self.iSimulatorCount = 10\n self.lstSimulators = []\n \n for i in xrange(self.iSimulatorCount):\n simulator = SIMULATER(i, self.globalData)\n self.lstSimulators.append(simulator)", "def experiment_init(self):\n raise NotImplementedError(\"this needs to be implemented!\")", "def initialize(self):\n self._setup_simulation_from_parameters()\n if \"orrb\" in self.constants.observation_providers:\n self._reset()\n self._goal = self._next_goal()\n self.update_goal_info()\n\n self.observer = self._build_observer()", "def make_sims(self):\n self.sims = [Simulation(conf=c) for c in self.sim_confs]", "def __init__(self):\n self.time_limit = None\n self.time_step = None\n self.robot = None\n self.humans = None\n self.global_time = None\n self.human_times = None\n # reward function\n self.discomfort_dist = None\n # simulation configuration\n self.config = None\n self.randomize_attributes = None\n self.square_width = None\n self.circle_radius = None\n self.human_num = None\n # for visualization\n self.states = None\n self.states_traj = None", "def initialize(self):\r\n N = self.N\r\n self.mean = array(self.x0, copy=True)\r\n self.sigma = self.sigma0\r\n self.sigmai = np.ones(N)\r\n self.ps = np.zeros(N) # path for individual and globalstep-size(s)\r\n self.r = np.zeros(N)\r\n self.pr = 0 # cumulation for zr = N(0,1)\r\n self.sigma_r = 0", "def __init__(self, constraints, sys_config, benchmark, options, fitness_func = mock_eval_stats):\n\n super().__init__(constraints, benchmark, options)\n self.sims = [MockSim(sys_config)]\n self.stats = {}\n self.fitness = None\n self.fitness_func = fitness_func", "def __init__(self):\n print(\"Initializing system...\"),\n for i in range(0,self.numAtoms):\n self.atoms.append(Atom())\n self.assignPositions()\n self.applyBoltzmannDist()\n self.correctMomenta()\n print(\"done.\")\n print(\"Simulation is now running.\")", "def initialize(self):\n self.write_model(path=PATH.GRAD, suffix='new')\n\n if PAR.RANDOM_OVER_IT or optimize.iter == 1:\n self.get_random_frequencies()\n\n print('Generating synthetics')\n system.run('solver', 'eval_func',\n hosts='all',\n path=PATH.GRAD)\n\n self.write_misfit(path=PATH.GRAD, suffix='new')", "def setUpClass(cls):\n np.random.seed(2019)\n # So the 1 st row of the first random number array, random.rand(500, 3)\n # will be [0.90348221, 0.39308051, 0.62396996]\n # Accordingly, the first row of\n # coordinates = (0.5 - np.random.rand(500, 3)) * box_length\n # should be [-3.31690899, 0.87895379, -1.01912071]\n cls.sys_obj = monte_carlo.SystemSetup()\n cls.energy = energy.Energy()\n cls.parser = monte_carlo.initialize()\n cls.sim = monte_carlo.MonteCarlo(\n cls.sys_obj, cls.energy, cls.parser)\n np.random.seed()", "def experiments_init(self):\n pass", "def startSimulation(self):\n self.saveParameters()\n self.simulation.main()", "def setUpClass(cls):\n cls.sim1 = Simulation(logging_level=50)\n cls.sim1.set_simulation_parameters(\n seed=1, task=47, output_directory=\"output\", min_speciation_rate=0.01, sigma=2, deme=1, sample_size=0.01\n )\n cls.sim1.set_map_files(\n \"null\", fine_file=\"sample/SA_sample_fine.tif\", reproduction_map=\"sample/SA_sample_reproduction.tif\"\n )\n cls.sim1.run()\n cls.sim2 = Simulation(logging_level=50)\n cls.sim2.set_simulation_parameters(\n seed=1, task=48, output_directory=\"output\", min_speciation_rate=0.01, sigma=2, deme=1, sample_size=0.01\n )\n cls.sim2.set_map_files(\"null\", fine_file=\"sample/SA_sample_fine.tif\")\n cls.sim2.run()\n cls.sim3 = Simulation(logging_level=50)\n cls.sim3.set_simulation_parameters(\n seed=2, task=47, output_directory=\"output\", min_speciation_rate=0.01, sigma=2, deme=1, sample_size=0.01\n )\n cls.sim3.set_map_files(\n \"null\",\n fine_file=\"sample/SA_sample_fine.tif\",\n death_map=\"sample/SA_sample_reproduction.tif\",\n reproduction_map=\"sample/SA_sample_reproduction.tif\",\n )\n cls.sim3.run()\n cls.sim4 = Simulation(logging_level=50)\n cls.sim4.set_simulation_parameters(\n seed=4, task=47, output_directory=\"output\", min_speciation_rate=0.01, sigma=2, deme=1, sample_size=0.01\n )\n cls.sim4.set_map_files(\"null\", fine_file=\"sample/SA_sample_coarse_pristine.tif\")\n cls.sim4.add_reproduction_map(reproduction_map=\"sample/SA_reproduction_coarse.tif\")\n cls.sim4.add_death_map(death_map=\"sample/SA_death.tif\")\n cls.sim4.add_dispersal_map(dispersal_map=\"sample/dispersal_fine2.tif\")\n cls.sim4.run()\n cls.coal1 = CoalescenceTree(cls.sim1)\n cls.coal2 = CoalescenceTree(cls.sim2)\n cls.coal3 = CoalescenceTree(cls.sim3)\n cls.coal4 = CoalescenceTree(cls.sim4)", "def setUp(self):\n shape = RNG.integers(5, 50)\n periods = self.periods = RNG.normal() * 3\n freq = periods / shape\n amp = RNG.normal()\n offset = RNG.normal()\n phase = (RNG.normal() - 1 / 2) * 5 / 3 * np.pi\n p_gt = self.p_gt = (amp, freq, phase, offset)\n x = self.x = np.arange(shape)\n self.data = sine(x, *p_gt)", "def __init__(self):\n self._max_sim_time_reached = False\n self._max_wall_time_reached = False\n self._behavior_finished = False\n self._flexbe_status_subscriber = None\n\n self._mission_finalizers = \"\"\n self._mission_sim_time_in_sec = 0\n self._finalizer_functions = []\n\n self.read_ros_params()\n CiLog.info(\"Init of SimulationControl constructor finished.\")", "def prepare_simulations(self):\n # Parse all recording sites into a tuple containing the variable name,\n # segment name and component names\n for setup in self._simulation_setups:\n for i, rec in enumerate(setup.record_variables):\n if rec is None:\n # Records the voltage in the default segment by default\n var = 'v'\n segname = self.default_seg\n component = None\n else:\n parts = rec.split('.')\n if len(parts) == 1:\n var = parts[0]\n segname = self.default_seg\n component = None\n elif len(parts) == 2:\n segname, var = parts\n component = None\n else:\n segname, component, var = parts\n setup.record_variables[i] = (var, segname, component)\n # Check to see if there are multiple setups, because if there aren't\n # the cell can be initialised (they can't in general if there are\n # multiple as there is only ever one instance of NEURON running)\n if len(self._simulation_setups) == 1:\n self._prepare(self._simulation_setups[0])", "def initialize(self,t0=0.0):\n \n # An connection_distribution_list (store unique connection(defined by weight,syn,prob))\n self.connection_distribution_collection = ConnectionDistributionCollection() # this is \n self.t = t0\n \n # put all subpopulation and all connections into the same platform\n for subpop in self.population_list:\n subpop.simulation = self\n for connpair in self.connection_list:\n connpair.simulation = self\n \n \n \n # initialize population_list, calculate \n \n \n for p in self.population_list:\n p.initialize() # 2 \n \n for c in self.connection_list:\n print 'initialize population'\n c.initialize() # 1", "def initialize(self,settings = None):\n self.evaluated = 0\n self.evals = 0\n self.initializer.evaluate(self)", "def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True", "def __init__(self,simulation_manager):\n self.simulation_manager = simulation_manager", "def initialize(self):\n\t\tmu = 0\n\t\tsigma = np.sqrt(2 / self.dataset[\"d\"])\n\n\t\tself.F1 = np.random.normal(mu, sigma, self.F1.shape)\n\t\tself.F2 = np.random.normal(mu, sigma, self.F2.shape)\n\t\tself.W = np.random.normal(mu, sigma, self.W.shape)\n\n\t\tself.F1_momentum = np.zeros(self.F1.shape)\n\t\tself.F2_momentum = np.zeros(self.F2.shape)\n\t\tself.W_momentum = np.zeros(self.W.shape)", "def __init__(self):\n\n self.read_input_file()\n self.read_simulation_files()", "def __init__(self, numInputs, numOutputs, randomize=False, bias=False):\n super().__init__(numInputs, numOutputs, randomize, bias)\n self.iterations = 0", "def setUpClass(self):\n self.c = Simulation(logging_level=50)\n self.c.set_simulation_parameters(\n seed=1,\n task=32,\n output_directory=\"output\",\n min_speciation_rate=0.5,\n sigma=2,\n tau=2,\n deme=1,\n sample_size=0.1,\n max_time=10,\n dispersal_relative_cost=1,\n min_num_species=1,\n )\n self.c.set_map_files(\n sample_file=\"sample/SA_samplemaskINT.tif\",\n fine_file=\"sample/SA_sample_coarse.tif\",\n dispersal_map=\"sample/dispersal_fine.tif\",\n )\n self.c.run()", "def initialize(self):\n self.initialize_edges()\n self.initialize_prob()\n self.initialize_total_input_dict()\n\n self.initialize_fpmusigv_dict()", "def __init__(self, **kwargs):\n\n args = {\n 'nobs': None, # Number of observations\n 'npred': None, # Number of predictors\n 'nrelpred': None, # Number of relevant predictors\n 'relpos': None, # Position of relevant predictor components\n 'gamma': None, # Decay factor of eigenvalue of predictor\n 'rsq': None, # Coefficient of determination\n 'sim_type': None, # Type of simulation: univariate, bivariate, multivariate\n }\n for key, value in args.items():\n setattr(self, key, value)\n\n for key, value in kwargs.items():\n setattr(self, key, value)", "def setUp(self):\n self.m = m = random.randint(1, 100)\n self.n = n = random.randint(1, 100)\n self.sig = sig = Signature(\"name\", Dim(\"m\"), Dim(\"n\"),\n sData(\"A\", \"ldA * n\"), Ld(\"ldA\", \"m\"),\n dData(\"B\", \"ldB * m\"), Ld(\"ldB\", \"m\"),\n cData(\"C\", \"ldC * n\"), Ld(\"ldC\", \"n\"))\n self.ex = ex = Experiment()\n ex.calls = [sig(m, n, \"X\", None, \"Y\", None, \"Z\", None)]\n ex.infer_lds()\n self.i = Symbol(\"i\")\n self.j = Symbol(\"j\")", "def _initializeMatrices(self):\n K = self.K \n # Initialize Initia\n rand_initial_prob = np.random.dirichlet(np.ones(K),size=1)\n rand_initial_prob = list(rand_initial_prob[0,:])\n for i in range(K): \n self.state_initial_prob[i+1] = rand_initial_prob[i]\n\n # Initialize the transition MAtrix !\n for i in range(K):\n rand_initial_prob = np.random.dirichlet(np.ones(K),size=1)\n rand_initial_prob = list(rand_initial_prob[0,:]) \n\n for j in range(K):\n self.state_transition_mat[(j+1,i+1)] = rand_initial_prob[j]\n\n\n # Initialize the symbol distribution Parameters ui and si (Assuming a numeric outputs ! Modelled using a gaussian ! withmean ui and std si)\n init_mean = np.mean(self.data_matrix)\n init_std = np.std(self.data_matrix) \n \n for i in range(K):\n random_mean = gauss(init_mean,30)\n random_std = gauss(init_std,30)\n self.state_symbol_prob[i+1] = {'mean':random_mean, 'std' : random_std}", "def __simSetup(self):\n self.__prime_ip = [(io[0], '$', io[2]) for io in self.listPrimeIos(True) if io[1] == 'i']\n\n # setting primary output values to None\n for prime_op in self.__prime_op:\n self.dGrph[prime_op][1] = None\n\n # setting cfg_blck output values to None\n blck_ids = [blck[0] for blck in self.listCfgBlcks()]\n for cfg_id in blck_ids:\n self.dGrph[cfg_id][1][1] = None\n \n # setting ari_blck output values to None\n blck_ids = [blck[0] for blck in self.listAriBlcks()]\n for ari_id in blck_ids:\n self.dGrph[ari_id][1][0][1] = None\n self.dGrph[ari_id][1][1][1] = None\n self.dGrph[ari_id][1][2][1] = None\n \n # setting tribuf output values to None\n blck_ids = [blck[0] for blck in self.listTribufs()]\n for tri_id in blck_ids:\n self.dGrph[tri_id][2][1] = None\n \n # setting gate output values to None\n blck_ids = [blck[0] for blck in self.listGates()]\n for gate_id in blck_ids:\n self.dGrph[gate_id][1][2] = None", "def setUp(self):\n self._m = 100\n self._n = 30\n self._k = 5\n self._increment = 20\n self._A = get_data(ExperimentType.ExampleNo2)(self._m, np.arange(2 * self._k).astype(float))\n self._approximation = random_id(self._A, self._k, self._increment)\n self._B = self._approximation.B\n self._P = np.array(self._approximation.P)\n self._A = self._A.as_numpy_arr()\n self._n = self._A.shape[1]\n self._approximation = self._approximation.as_numpy_arr()", "def Initialize(self):\n problem_data = self.project_parameters[\"problem_data\"]\n if problem_data.Has(\"start_time\"):\n warn_msg = 'Parameter TIME is used as load factor. \\n'\n warn_msg += 'Parameter \"start_time\" will be ignored!'\n KratosMultiphysics.Logger.PrintWarning(\"StructuralMechanicsPrebucklingAnalysis; Warning\", warn_msg)\n else:\n # Create dummy parameter\n aux_settings = KratosMultiphysics.Parameters(r\"\"\"{ \"start_time\" : 1.0 }\"\"\")\n problem_data.AddMissingParameters(aux_settings)\n\n if problem_data.Has(\"end_time\"):\n warn_msg = 'Parameter TIME is used as load factor. \\n'\n warn_msg += 'Parameter \"end_time\" will be ignored!'\n KratosMultiphysics.Logger.PrintWarning(\"StructuralMechanicsPrebucklingAnalysis; Warning\", warn_msg)\n else:\n # Create dummy paramter\n aux_settings = KratosMultiphysics.Parameters(r\"\"\"{ \"end_time\" : 1.0 }\"\"\")\n problem_data.AddMissingParameters(aux_settings)\n\n # Initialize super class\n super().Initialize()\n\n # Initialize solution stepping\n self.step = 0\n self.time = 1\n if not problem_data.Has(\"nsteps\"):\n raise Exception(\"StructuralMechanicsPrebucklingAnalysis: \" + 'Maximum number of steps \"nsteps\" must be provided\"!')\n else:\n self.nsteps = problem_data[\"nsteps\"].GetInt()\n\n ## If the echo level is high enough, print the complete list of settings used to run the simualtion\n if self.echo_level > 1:\n with open(\"ProjectParametersOutput.json\", 'w') as parameter_output_file:\n parameter_output_file.write(self.project_parameters.PrettyPrintJsonString())\n\n KratosMultiphysics.Logger.PrintInfo(self._GetSimulationName(), \"Analysis -START- \")", "def __init__(self):\n\n smach.State.__init__(self, \n outcomes=['GoToNormal','GoToSleep'])\n \n self.rate = rospy.Rate(200) # Loop at 50 Hz", "def __init__(self, data, simulator, \n prior_dict = {}, \n N = 1000, \n eps0 = 0.01, \n T = 20, \n Nthreads = 10):\n\n self.data = data\n self.N = N \n self.eps0 = eps0 \n self.T = T \n self.Nthreads = Nthreads \n\n # simulator function has to be a function of theta_star \n self.simz = simulator\n\n self.prior_param(param_dict = prior_dict) # first run prior parameters", "def __init__(self):\n \n smach.State.__init__(self, \n outcomes=['GoToNormal','GoToPlay'])\n \n self.rate = rospy.Rate(200) # Loop at 50 Hz", "def setUp(self):\n self.s = Simulation()\n self.s['Retina']=GeneratorSheet(nominal_density=4.0)\n self.s['V1']= CFSheet(nominal_density=4.0)\n self.s['V2'] = CFSheet(nominal_density=4.0)\n\n self.s.connect('Retina','V1',delay=0.5,connection_type=CFProjection,\n name='RtoV1',learning_fn=CFPLF_Hebbian())\n\n self.s.connect('Retina','V2',delay=0.5,connection_type=CFProjection,\n name='RtoV2',learning_fn=CFPLF_Hebbian())", "def __init__(self, simulation):\n\n self.sim = simulation # A starting simulation with default values\n\n self.master = Tk()\n self.master.title(\"Primedice Simulator\")\n\n self.balance_label, self.balance_input, self.balance_str = \\\n self.make_balance_input()\n self.base_bet_label, self.base_bet_input, self.base_bet_str = \\\n self.make_base_bet_input()\n self.payout_label, self.payout_input, self.payout_str = \\\n self.make_payout_input()\n\n self.iterations_label, self.iterations_input, self.iterations_str = \\\n self.make_iterations_input()\n self.loss_adder_label, self.loss_adder_input, self.loss_adder_str = \\\n self.make_loss_adder_input()\n\n self.run_button = self.make_run_button()\n\n self.progress_label, self.progress_bar = self.make_progress_bar()\n\n self.graph_fig = self.make_graph()\n self.sim_results = None # Placeholder for when results come in\n\n self.master.mainloop()", "def __init__(\n self, sequence: Sequence, config: EmulatorConfig = EmulatorConfig()\n ):\n super().__init__(sequence)\n if not isinstance(config, EmulatorConfig):\n raise TypeError(\n \"'config' must be of type 'EmulatorConfig', \"\n f\"not {type(config)}.\"\n )\n self._config = config\n self._sim_obj = QutipEmulator.from_sequence(\n sequence,\n sampling_rate=self._config.sampling_rate,\n config=SimConfig.from_noise_model(self._config.noise_model),\n evaluation_times=self._config.evaluation_times,\n with_modulation=self._config.with_modulation,\n )\n self._sim_obj.set_initial_state(self._config.initial_state)", "def setUpClass(cls):\n cls.c = Simulation(logging_level=logging.CRITICAL)\n cls.c.set_simulation_parameters(\n seed=2,\n task=32,\n output_directory=\"output\",\n min_speciation_rate=0.5,\n sigma=2,\n tau=2,\n deme=1,\n sample_size=0.1,\n max_time=10,\n dispersal_relative_cost=1,\n min_num_species=1,\n )\n cls.c.set_map_files(\n sample_file=\"sample/SA_samplemaskINT.tif\",\n fine_file=\"sample/SA_sample_coarse.tif\",\n dispersal_map=\"sample/dispersal_fine_cumulative.tif\",\n )\n cls.c.run()", "def initialize(self):\n self.population.initialize()\n self.cache.initialize()\n if self.storage:\n self.storage.initialize()", "def initialize(self):\n self.tree = ROOT.TTree('tree', 'tree')\n self.simhitcount = []\n self.simhitarrays = np.array(self.simhitcount, dtype=np.int32)\n self.digitcount = []", "def init_sims(self, replace=False):\n return self.docvecs.init_sims(replace=replace)", "def _set_runtimes(self):\n self._run_times =np.zeros(self.n_runs, dtype = np.float)", "def setUpClass(cls):\n cls.c = Simulation(logging_level=logging.CRITICAL)\n cls.c.set_simulation_parameters(\n seed=3,\n task=32,\n output_directory=\"output\",\n min_speciation_rate=0.5,\n sigma=2,\n tau=2,\n deme=1,\n sample_size=0.1,\n max_time=10,\n dispersal_relative_cost=1,\n min_num_species=1,\n )\n cls.c.set_map_files(\n sample_file=\"sample/SA_samplemaskINT.tif\",\n fine_file=\"sample/SA_sample_coarse.tif\",\n dispersal_map=\"sample/dispersal_fine_nodata.tif\",\n )\n cls.c.run()", "def init_population(self):\n pass", "def __init__(self, _connecteur):\n super(Simulateur, self).__init__()\n\n _log.Linfo(\"Init -- Simulateur\")\n\n self.S_connecteur = _connecteur\n self.S_resultats = []\n self.S_duree_simulation = 0", "def init_sim(self,n):\n self.beacon = beacon(ENABLE_BEACON_DELAY)\n self.data = data_utils(n)\n random.seed()\n\n if n < 3:\n print 'Number of receivers %i is less than three.' %n\n print 'Simulation controller will not run.'\n print 'Now exiting.'\n sys.exit()\n \n self.data.set_rx_number(n)\n\n\n\n tx_loc = test_coords.get_tx_coords()\n self.data.set_tx_location(tx_loc)\n # self.data.reset_rx_location()\n\n for i in range(n):\n rx_loc = alex_random.get_random_coord()\n if self.DEBUG:\n print \"\\n\\n\\n\\n\\n\\nstore location: \", rx_loc\n print '\\n\\n\\n\\n\\n\\n'\n self.data.set_rx_location(i,rx_loc)\n\n tof = self.geo_utils.time_of_flight(rx_loc,tx_loc)\n self.data.set_rx_time_delay(tof)\n\n id = i+1\n self.data.set_rx_team_id(id)\n\n if self.DEBUG:\n print 'tx_loc: ', tx_loc\n print 'rx_loc: ', rx_loc\n print 'time: ', repr(tof)\n print 'id: ', id", "def init_run(self):\n raise NotImplementedError", "def __init__(self, name=None, params=None, params_from_file=False, params_from_user=False):\n\n print(\"\")\n if name:\n self._name = name\n else:\n self._name = input(\"Simulation Name : \")\n\n print(\"Name : \"+str(self._name))\n\n self.plot_path = os.getcwd()+'/session/'+self._name+'_plots/'\n try:\n os.mkdir(self.plot_path)\n except (FileExistsError, FileNotFoundError):\n beep = lambda x: os.system(\"echo '\\a';sleep 0.5;\" * x)\n beep(1)\n print(\"WARNING : FOLDER PATH ALREADY EXISTS\")\n print(self.plot_path)\n print(\"WRITING OVER\")\n for fn in os.listdir(self.plot_path):\n os.remove(self.plot_path+fn)\n\n if params:\n self.params = params\n else:\n if params_from_file:\n self.params = load_input_pickle(params_from_file)\n elif params_from_user:\n self.params = get_user_params()\n else:\n #Define default params\n self.params = load_input_pickle('default')\n\n self.default_runs = [] # array of simulation runs with default parameters\n self.mod_runs = [] # array of tuples that contain 0) a list of simulation runs\n # and 1) a dictionary clarifying which parameter was given\n # which value for each run. (for convenience, can also\n # determine by comparing the simulation_run.params\n # directly\n\n\n print(\"Running Model with Default Parameters...\")\n self.run_default()\n print(\"\")", "def __init__(self):\n\n ## TODO: create the actual HMM\n\n self.hmm = [HmmStates('S', {'S': 0.8, 'R': 0.2}, {'H': 0.8, 'G': 0.2}),\n HmmStates('R', {'S': 0.4, 'R': 0.6}, {'H': 0.4, 'G': 0.6})] # dummy HMM for testing purpose\n\n # prior probabilities TODO: compute prior probabilities from HMM\n self.prior = {'S': 2/3, 'R': 1/3}", "def init_for_run(self, stimulus):\n # Initialize training traces\n max_steps = self.settings['maxSteps']\n self.vars['S_trace'] = torch.zeros((max_steps, self.nSym, 1))\n self.vars['prev_s'] = torch.tensor(\n [float('inf')]) * torch.ones((self.nSym, 1))\n self.vars['Harmony_trace'] = torch.zeros(max_steps)\n self.vars['speed_trace'] = torch.zeros(max_steps)\n self.vars['ema_trace'] = torch.zeros(max_steps)\n self.vars['lambda_trace'] = torch.zeros(max_steps)\n self.vars['temp_trace'] = torch.zeros(max_steps)\n self.vars['TP_trace'] = list()\n self.vars['TPnum_trace'] = torch.zeros(max_steps)\n self.vars['TP_h_trace'] = torch.zeros(max_steps)\n self.vars['TP_dist_trace'] = torch.zeros(max_steps)\n self.vars['maxHarmony'] = torch.empty(\n self.grammar.nF, self.grammar.nR, self.nStimuli, self.settings['epochs']) # check this size\n\n # Create the representations for the stimulus\n stimulus = fortran_reshape(\n stimulus, (torch.numel(stimulus), 1)).double()\n #stimulus = stimulus.reshape((torch.numel(stimulus), 1)).double()\n # TPR representation of the external input\n self.inpS = self.TP.matmul(stimulus)\n #self.inpC = self.TP.T.matmul(self.inpS)\n self.inpC = self.toConceptual(self.inpS)\n\n # ----------- ALTERNATIVE ---------------------\n # Let's try the matrix version : TPR = Fillers * Bind * Roles\n #self.inpS = self.F.matmul(stimulus).matmul(self.R)\n #self.inpC = self.inpS @ torch.pinverse(self.R)\n # Reshape to make it suitable for MM.\n #self.inpC = self.inpC.reshape((self.inpC.numel(), 1))\n #self.inpS = self.inpS.reshape((self.inpS.numel(), 1))\n # ----------- END ALTERNATIVE ---------------------\n\n self.inpS = self.inpS.double()\n self.inpC = self.inpC.double()\n\n # Initialize state\n self.initial_state = self.settings['initStateMean'] + torch.normal(0.5, .2,\n (self.grammar.nF, self.grammar.nR)) * self.settings[\"initStateStdev\"]\n self.initial_state = self.initial_state.double()\n self.state = self.toNeural(self.initial_state)\n self.stateC = self.toConceptual(self.state)\n\n # Set Lambda and Temperature\n self.vars['T'] = self.vars['TInit']\n self.vars['lambda'] = self.vars['lambdaInit']\n self.vars['step'] = 0", "def __init__(self, simtab, metadata):\n if not 'L_NU_X_BAR' in simtab.colnames:\n # table only contains NU_E, NU_E_BAR, and NU_X, so double up\n # the use of NU_X for NU_X_BAR.\n for val in ['L','E','ALPHA']:\n simtab[f'{val}_NU_X_BAR'] = simtab[f'{val}_NU_X']\n # Get grid of model times.\n time = simtab['TIME'] << u.s\n # Set up dictionary of luminosity, mean energy and shape parameter\n # alpha, keyed by neutrino flavor (NU_E, NU_X, NU_E_BAR, NU_X_BAR).\n self.luminosity = {}\n self.meanE = {}\n self.pinch = {}\n for f in Flavor:\n self.luminosity[f] = simtab[f'L_{f.name}'] << u.erg/u.s\n self.meanE[f] = simtab[f'E_{f.name}'] << u.MeV\n self.pinch[f] = simtab[f'ALPHA_{f.name}']\n super().__init__(time, metadata)", "def initialize(self, simulator):\r\n self.__eventHandlers = {}\r\n self.__simulator = simulator\r\n self.__threads = {}", "def setup_class(cls):\n\n # set the base directory\n cls.basedir = os.path.join(os.path.split(os.path.realpath(__file__))[0], \"base\")\n\n cls.ninj = 50 # number of simulated signals\n cls.maxamp = 5e-23 # maximum amplitude\n cls.freqrange = (10.0, 100.0) # frequency range\n\n # default prior dictionary\n cls.priors = {}\n cls.priors[\"h0\"] = bilby.core.prior.Uniform(\n name=\"h0\", minimum=0.0, maximum=1e-22\n )", "def __init__(self):\n Sampler.__init__(self)\n self._registeredIdentifiers = set() # tracks job identifiers used for this adaptive sampler and its inheritors\n self._prefixToIdentifiers = {} # tracks the mapping of run prefixes to particular identifiers\n self._inputIdentifiers = {} # identifiers for a single realization\n self._targetEvaluation = None # data object with feedback from sample realizations\n self._solutionExport = None # data object for solution printing\n self._requireSolnExport = False # if this object requires a solution export\n # NOTE TargetEvaluations consider all the Step <Output> DataObjects as candidates, so requiring\n # exactly one TargetEvaluation forces only having one <Output> DataObject in AdaptiveSampling\n # MultiRun Steps. For now, we leave it as \"n\".\n self.addAssemblerObject('TargetEvaluation', InputData.Quantity.one_to_infinity) # Place where realization evaluations go", "def setUpClass(cls):\n cls.c = Simulation(logging_level=logging.ERROR)\n cls.c.set_simulation_parameters(\n seed=1, task=39, output_directory=\"output\", min_speciation_rate=1, deme=100, spatial=False\n )\n cls.c.run()\n cls.c2 = Simulation(logging_level=logging.ERROR)\n cls.c2.set_simulation_parameters(\n seed=1, task=40, output_directory=\"output\", min_speciation_rate=0.5, deme=100, spatial=False\n )\n cls.c2.run()\n cls.c3 = Simulation(logging_level=logging.ERROR)\n cls.c3.set_simulation_parameters(\n seed=1,\n task=41,\n output_directory=\"output\",\n min_speciation_rate=0.5,\n deme=100,\n spatial=False,\n protracted=True,\n min_speciation_gen=0,\n max_speciation_gen=1,\n )\n cls.c3.run()\n cls.c4 = Simulation(logging_level=logging.ERROR)\n cls.c4.set_simulation_parameters(\n seed=1,\n task=42,\n output_directory=\"output\",\n min_speciation_rate=0.5,\n deme=100,\n spatial=False,\n protracted=True,\n min_speciation_gen=100,\n max_speciation_gen=1000,\n )\n cls.c4.run()", "def __init__(self):\n\n #call super class's __init__ method\n super(TRiseSampler, self).__init__(name=\"trise\", observed=False)", "def __init__(self, simulator, filename=None):\n\n data = {} if filename is None else JsonUtils.read_file(filename)\n if data == {}:\n self.logger.warning(\"The config is empty. You may have a problem with your config file.\")\n # Simulation parameters\n self.name = data[\"name\"] if \"name\" in data else \"\"\n self.sim_speed = data[\"sim_speed\"] if \"sim_speed\" in data else 1.0\n self.logger_name = data[\"logger_name\"] if \"logger_name\" in data else \"INFO\"\n self.logger = logging.Logger(self.logger_name)\n self.exit_condition = data[\"exit_condition\"] if \"exit_condition\" in data else \"self.body.config.n_iter > 500\"\n self.timeout = data[\"timeout\"] if \"timeout\" in data else 10\n self.simulator = simulator\n self.t_init = 0\n self.t_end = 0\n self.n_iter = 0\n\n # Physical parameters\n self.body = data[\"body\"] if \"body\" in data else dict()\n self.legs = data[\"legs\"] if \"legs\" in data else []\n self.brain = data[\"brain\"] if \"brain\" in data else dict()\n self.connection_matrix = data[\"connection_matrix\"] if \"connection_matrix\" in data else dict()\n if self.connection_matrix == dict():\n self.config_connection_matrix()\n self.dist_ref = data[\"dist_ref\"] if \"dist_ref\" in data else 20\n self.power_ref = data[\"dist_ref\"] if \"dist_ref\" in data else 1000", "def _initialise(self):\n assert (not self._running)\n\n # Parent generation population size\n # The parameter parent_pop_size is the mu in the papers. It represents\n # the size of a parent population used to update our paramters.\n self._parent_pop_size = self._population_size // 2\n\n # Weights, all set equal for the moment\n # Sum of all positive weights should be 1\n self._W = 1 + np.arange(self._population_size)\n self._W = np.log(0.5 * (self._population_size + 1)) - np.log(self._W)\n\n # Inverse of the sum of the first parent weights squared (variance\n # effective selection mass)\n self._muEff = (\n np.sum(self._W[:self._parent_pop_size]) ** 2\n / np.sum(np.square(self._W[:self._parent_pop_size]))\n )\n\n # Inverse of the Sum of the last weights squared (variance effective\n # selection mass)\n self._muEffMinus = (\n np.sum(self._W[self._parent_pop_size:]) ** 2\n / np.sum(np.square(self._W[self._parent_pop_size:]))\n )\n\n # cummulation, evolution paths, used to update Cov matrix and sigma)\n self._pc = np.zeros(self._n_parameters)\n self._psig = np.zeros(self._n_parameters)\n\n # learning rate for the mean\n self._cm = 1\n\n # Decay rate of the evolution path for C\n self._ccov = (4 + self._muEff / self._n_parameters) / (\n self._n_parameters + 4 + 2 * self._muEff / self._n_parameters)\n\n # Decay rate of the evolution path for sigma\n self._csig = (2 + self._muEff) / (self._n_parameters + 5 + self._muEff)\n\n # See rank-1 vs rank-mu updates\n # Learning rate for rank-1 update\n self._c1 = 2 / ((self._n_parameters + 1.3) ** 2 + self._muEff)\n\n # Learning rate for rank-mu update\n self._cmu = min(\n 2 * (self._muEff - 2 + 1 / self._muEff)\n / ((self._n_parameters + 2) ** 2 + self._muEff),\n 1 - self._c1\n )\n\n # Damping of the step-size (sigma0) update\n self._dsig = 1 + self._csig + 2 * max(\n 0, np.sqrt((self._muEff - 1) / (self._n_parameters + 1)) - 1)\n\n # Parameters from the Table 1 of [1]\n alpha_mu = 1 + self._c1 / self._cmu\n alpha_mueff = 1 + 2 * self._muEffMinus / (self._muEff + 2)\n alpha_pos_def = \\\n (1 - self._c1 - self._cmu) / (self._n_parameters * self._cmu)\n\n # Rescale the weights\n sum_pos = np.sum(self._W[self._W > 0])\n sum_neg = np.sum(self._W[self._W < 0])\n scale_pos = 1 / sum_pos\n scale_neg = min(alpha_mu, alpha_mueff, alpha_pos_def) / -sum_neg\n self._W[self._W > 0] *= scale_pos\n self._W[self._W < 0] *= scale_neg\n\n # Update optimiser state\n self._running = True", "def setUp(self):\n self.sampler = {\n \"name\": \"samplername\",\n \"backend_name\": \"\",\n \"backend_header\": \"\",\n \"backend_prefix\": \"\",\n \"backend_suffix\": \"\",\n \"backend_footer\": \"\",\n \"ncores\": 2,\n \"threads_per_core\": 1,\n \"omp_enabled\": True,\n \"papi_enabled\": True,\n \"papi_counters_max\": 2,\n \"papi_counters_avail\": (\"C1\", \"C2\", \"C3\"),\n \"kernels\": {\"dgemm\": (\n 'dgemm', 'char*', 'char*', 'int*', 'int*', 'int*', 'double*',\n 'double*', 'int*', 'double*', 'int*', 'double*', 'float*',\n 'int*'\n )},\n \"nt_max\": random.randint(1, 10),\n \"exe\": \"x\"\n }\n self.i = Symbol(\"i\")\n self.j = Symbol(\"j\")\n self.k = Symbol(\"k\")\n self.ns = [random.randint(1, 100) for _ in range(5)]", "def initialize(self) -> None:\n self.simulation = self.initialize_simulation()\n width, height = get_window_resolution()\n display_dim = ((0, width), (0, height))\n self.coord_mapper = CoordinateMapper2D(*self.simulation.dim, *display_dim)\n self.simple_pygame.all_sprites.empty()\n self.initialize_visualization()", "def initialise(self):\n # Can take quite a lot of time due to the homing\n print(\"Initialising spectrograph.\")\n err = self._dll.ShamrockInitialize()\n self.status(\"Initialisation\", err)", "def initialize(self):\n raise NotImplementedError", "def initialize(self):\n raise NotImplementedError", "def initialize(self):\n raise NotImplementedError", "def initialize(self):\n self.initilize_multiply_array() # m\n self.initialize_cameras()\n self.initialize_electronics()\n self.logger.info('Starting free runs and continuous reads')\n self.camera_microscope.start_free_run()\n self.camera_microscope.continuous_reads()\n self.camera_fiber.start_free_run()\n self.camera_fiber.continuous_reads()\n self.servo_off()\n\n time.sleep(1) #m Without the sleep below initialize_multiply_array does not work", "def initialize(self):\n # FIX: INITIALIZE PROCESS INPUTS??\n for mech, value in self.initial_values.items():\n mech.initialize(value)", "def __init__(self):\n self.time_limit = None\n self.time_step = None\n self.robot = None\n self.humans = None\n self.global_time = None\n self.robot_sensor_range = None\n # reward function\n self.success_reward = None\n self.collision_penalty = None\n self.discomfort_dist = None\n self.discomfort_penalty_factor = None\n # simulation configuration\n self.config = None\n self.case_capacity = None\n self.case_size = None\n self.case_counter = None\n self.randomize_attributes = None\n self.train_val_scenario = None\n self.test_scenario = None\n self.current_scenario = None\n self.square_width = None\n self.circle_radius = None\n self.human_num = None\n self.group_num = None\n self.group_size = None\n self.nonstop_human = None\n self.centralized_planning = None\n self.centralized_planner = None\n\n # for visualization\n self.states = None\n self.action_values = None\n self.attention_weights = None\n self.robot_actions = None\n self.rewards = None\n self.As = None\n self.Xs = None\n self.feats = None\n self.trajs = list()\n self.save_scene_dir = None\n self.panel_width = 10\n self.panel_height = 10\n self.panel_scale = 1\n self.test_scene_seeds = []\n self.dynamic_human_num = []\n self.human_starts = []\n self.human_goals = []\n\n #for debug\n self.add_human = []\n self.delete_human = []\n self.total_group_size = 0\n self.hp_25 = {}\n self.ha_25 = {}\n self.phase = None", "def __init__(self, simulation_attributes):\n for attr in ['locations','dprime_fnc','next_fixation',\n 'threshold', 'num_of_searches']:\n if getattr(simulation_attributes,attr) is None:\n assert False, (\n \"Precondition violation: none attribute in simulation_attributes \"\n + attr\n )\n if not isinstance(simulation_attributes, SimulationAttributes):\n raise TypeError(\n \"The argument isn't an instance of SimulationAttributes class\"\n )\n self.senzory_map = self._locations_to_senzory_map(\n simulation_attributes.locations\n )\n self.number_of_locs = self.senzory_map.shape[0]\n self.dprime_fnc = simulation_attributes.dprime_fnc\n self.dprime_map = generate_dprime_map(self.dprime_fnc,self.senzory_map)\n self.next_fixation = simulation_attributes.next_fixation\n self.threshold = simulation_attributes.threshold\n self.num_of_searches = simulation_attributes.num_of_searches", "def setup_simulation(system, pdb, integrator):\n #platform = Platform.getPlatformByName('CPU')\n platform = Platform.getPlatformByName('OpenCL')\n prop = {'OpenCLPrecision':'single'}\n\n simulation = Simulation(pdb.topology, system, integrator, platform, prop)\n simulation.context.setPositions(pdb.positions)\n simulation.minimizeEnergy()\n simulation.context.setVelocitiesToTemperature(300*kelvin)\n print('Created simulation')\n return simulation", "def __init__(self):\n self.robot = None\n self.humans = None\n self.global_time = None\n self.human_times = None\n # Simulation configuration\n self.config = None\n self.time_limit = None\n self.time_step = None\n self.end_on_collision = True\n self.side = None\n self.pixel_side = None\n self.closed = None\n self.goal_radius = None\n self.max_humans = None\n self.min_humans = None\n self.human_num_mode = None\n self.human_num = None\n self.perpetual = None\n self.rotate_path = None\n self.randomize_attributes = None\n self.square_width = None\n self.circle_radius = None\n # Reward function\n self.success_reward = None\n self.collision_penalty = None\n self.discomfort_dist = None\n self.discomfort_scale = None\n self.discomfort_penalty_factor = None\n self.group_discomfort_penalty = None\n self.time_penalty = None\n self.progress_reward = None\n self.initial_distance = None\n self.previous_distance = None\n # Internal environment configuration\n self.case_capacity = None\n self.case_size = None\n self.case_counter = None\n self.parallel = None\n self.max_tries = None\n self.train_val_sim = None\n self.test_sim = None\n # For visualization\n self.force_list = [\n \"desired_force\",\n \"social_force\",\n \"obstacle_force\",\n \"group_coherence_force\",\n \"group_repulsive_force\",\n \"group_gaze_force\",\n ] # TODO Configure this?\n self.forces = None\n self.states = None\n self.action_values = None\n self.attention_weights = None\n # For information return\n self.obs_history = np.array([])\n self.episode_info = dict()\n self.movie_file = \"\"\n\n self.scene_manager = None\n self.use_groups = None\n self.min_group_num = None\n self.max_group_num = None\n self.centralized_planning = None\n self.centralized_planner = None\n\n self.enable_intent = None\n self.intent_type = None\n\n self.obstacles = [] # xmin,xmax,ymin,ymax\n\n self.app = None", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def __init__(self):\n super().__init__()\n self.dataFilename = None\n self.functionType = None\n self.type = 'NDimensionalDistributions'\n self.dimensionality = None\n\n self.RNGInitDisc = 5\n self.RNGtolerance = 0.2", "def setSimulation(self, simulation):\r\n raise NotImplementedError()", "def __init__(self, **params):\n # Dimension of the true signal x\n self.N = params.get('N', 1024)\n\n # Dimension of the measurement vector y\n self.M = params.get('M', 256)\n\n # Number of timesteps\n self.T = params.get('T', 4)\n\n # Type of the random measurement matrix to generate\n # (1) : normalized Gaussian matrix\n self.A_type = params.get('A_type', 1)\n\n # Active support probability\n self.lambda_ = params.get('lambda_', 0.08) # high sparsity default\n\n # Amplitude mean\n self.zeta = params.get('zeta', 0)\n\n # Amplitude variance\n self.sigma2 = params.get('sigma2', 1)\n\n # Amplitude innovation rate\n self.alpha = params.get('alpha', 0.10)\n\n # Active-to-inactive transition probability\n self.p01 = params.get('p01', 0.10)\n\n # Desired signal-to-noise ratio, in dB\n self.desired_SNR = params.get('desired_SNR', 25)", "def initialize(self):\n\t\tpass", "def setUpClass(cls):\n cls.sim = Simulation(logging_level=40)\n cls.sim.set_simulation_parameters(\n seed=1, task=45, output_directory=\"output\", min_speciation_rate=0.01, sigma=1, deme=40, sample_size=0.25\n )\n cls.sim.set_map(\"null\", 10, 10)\n cls.sim.add_sample_time([0, 0.01, 0.02, 0.03])\n cls.sim.run()\n cls.coal = CoalescenceTree(cls.sim)", "def model_setup(self):\n self.DNN = SganMLP(self.settings.number_of_bins)\n self.D = SganMLP(self.settings.number_of_bins)\n self.G = Generator()", "def initialise_sampler(self):\n raise NotImplementedError", "def setUp(self) -> None:\n self.x, self.fs, _ = random.BeatsGenerator(seed=1234).audio()\n self.sm = sample_sm.SinusoidalModel()\n self.sm.w_ = self.sm._normalized_window # pylint: disable=W0212\n\n warnings.filterwarnings(\"ignore\", message=\"numpy.ufunc size changed\")", "def __init__(self, model_filename, sim_filename, include_paths = None):\n\n self.model_filename = model_filename\n self.sim_filename = sim_filename\n self.include_paths = include_paths\n \n self.simulation = None\n self.fit_input = None", "def initialize_trainer(self):\n self.initialize_matrices()\n self.initialize_model()\n self.initialize_optimizers()\n return self", "def initialize_variables(self):\n self.sess.run(self.init)", "def setUpClass(cls):\n cls.sim1 = Simulation(logging_level=50)\n cls.sim1.set_simulation_parameters(\n seed=1, task=46, output_directory=\"output\", min_speciation_rate=0.9, sigma=2, deme=1, sample_size=0.01\n )\n cls.sim1.set_map_files(\"null\", \"sample/high_density.tif\")\n cls.sim1.run()\n cls.sim2 = Simulation(logging_level=50)\n cls.sim2.set_simulation_parameters(\n seed=2, task=46, output_directory=\"output\", min_speciation_rate=0.9, sigma=2, deme=1, sample_size=0.01\n )\n cls.sim2.set_map_files(\"null\", \"sample/high_density.tif\")\n optim_sol = {\n \"grid_x_size\": 10,\n \"grid_y_size\": 10,\n \"sample_x_offset\": 10,\n \"sample_y_offset\": 10,\n \"grid_file_name\": \"set\",\n }\n cls.sim2.set_optimised_solution(optim_sol)\n cls.sim2.run()\n cls.tree1 = CoalescenceTree(cls.sim1)\n cls.tree2 = CoalescenceTree(cls.sim2)", "def initialize(self):\n \n #initialize the variables\n init = tf.global_variables_initializer()\n self.session.run(init)\n \n #initialize the data iterators\n self.session.run(self.data_iterator.initializer)", "def initialize(self):\r\n pass", "def initialize(self):\r\n pass", "def initialize(self):\n self.gc1.reset_parameters()\n self.gc2.reset_parameters()\n\n for s in self.scores:\n stdv = 1. / math.sqrt(s.size(1))\n s.data.uniform_(-stdv, stdv)\n for b in self.bias:\n # fill in b with postive value to make\n # score s closer to 1 at the beginning\n b.data.fill_(self.bias_init)\n\n for Dk in self.D_k:\n stdv = 1. / math.sqrt(Dk.size(1))\n Dk.data.uniform_(-stdv, stdv)\n\n for b in self.D_bias:\n b.data.fill_(0)", "def setup_simulation(system, pdb, integrator):\n #platform = Platform.getPlatformByName('CPU')\n platform = Platform.getPlatformByName('OpenCL')\n prop = {'OpenCLPrecision':'single'}\n \n simulation = Simulation(pdb.topology, system, integrator, platform, prop)\n simulation.context.setPositions(pdb.positions)\n simulation.minimizeEnergy()\n simulation.context.setVelocitiesToTemperature(310*kelvin)\n print('Created simulation')\n return simulation" ]
[ "0.8679043", "0.8295332", "0.75881743", "0.7474434", "0.72223103", "0.7014883", "0.70072925", "0.69853646", "0.6959237", "0.69506794", "0.6913647", "0.687302", "0.6830056", "0.6807677", "0.6807346", "0.6783988", "0.67358565", "0.6696126", "0.66808563", "0.6670654", "0.6643305", "0.6641261", "0.66344553", "0.6623198", "0.66022205", "0.6588499", "0.6588476", "0.65872896", "0.6584381", "0.65737075", "0.65734315", "0.65672904", "0.6558028", "0.65528286", "0.65231264", "0.65149313", "0.65125847", "0.65116465", "0.6505721", "0.6490115", "0.6462666", "0.6459781", "0.64531714", "0.644034", "0.64401567", "0.6436334", "0.64341265", "0.64323884", "0.6420037", "0.64166737", "0.6408755", "0.6407017", "0.64031774", "0.63971984", "0.6395136", "0.63783073", "0.6376558", "0.6374069", "0.6365709", "0.6363337", "0.6360867", "0.63511664", "0.63421583", "0.6333466", "0.6327744", "0.6327327", "0.6322777", "0.63222826", "0.6314802", "0.63138604", "0.63138604", "0.63138604", "0.63136965", "0.62983274", "0.6290937", "0.6286372", "0.62850803", "0.6284133", "0.62788075", "0.62788075", "0.62788075", "0.62788075", "0.62788075", "0.6276908", "0.6261363", "0.625735", "0.6252952", "0.624336", "0.6240109", "0.6227947", "0.62267625", "0.62225807", "0.6221551", "0.6220012", "0.6217737", "0.6213167", "0.62131363", "0.62131363", "0.6212623", "0.6212206" ]
0.7676313
2
Get the benchmark performance.
def get_performance(self): if self.skip_reference: return self.compare_sim.tps # Avoid divide by zero errors when the simulation is not executed. if self.reference_sim.tps == 0: return 0 t0 = 1 / self.reference_sim.tps t1 = 1 / self.compare_sim.tps return 1 / (t1 - t0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def benchmark(self):\n logger.info(self.benchmark.__doc__)\n return self.run(self.benchmark_profile())", "def get_performance(self):\n return self.sim.tps", "def get_speedtest():\n\n if(DEBUG):\n print(\"Gathering speedtest results...\", flush=True)\n\n s = Speedtest()\n s.get_best_server()\n s.download()\n \n return s.results.dict()", "def retrieve( self, benchmark, extraLabel='' ):\n if benchmark.reference is ReferenceBenchmark.SP:\n idx = np.argmax( self.sps )\n else:\n # Get reference for operation:\n if benchmark.reference is ReferenceBenchmark.Pd:\n ref = self.pds\n elif benchmark.reference is ReferenceBenchmark.Pf:\n ref = self.pfs\n delta = ref - benchmark.refVal\n idx = np.argmin( np.abs( delta ) )\n return PerformancePoint( name=extraLabel + benchmark.name\n , sp=self.sps[ idx ]\n , pd=self.pds[ idx ]\n , pf=self.pfs[idx]\n , thres=self.thresholds[idx]\n )", "def benchmark_result(self):\n return self._benchmark_id", "def Run(benchmark_spec: bm_spec.BenchmarkSpec) -> List[sample.Sample]:\n discovery_duration = benchmark_spec.data_discovery_service.DiscoverData()\n return [\n sample.Sample('data_discovery_duration', discovery_duration, 'seconds',\n benchmark_spec.data_discovery_service.GetMetadata())]", "def report_performance(self):\n performance = self.amygdala.visualize(self.timestep, \n self.name, \n self.log_dir)\n print('Final performance is {0:.3}'.format(performance))\n self.backup()\n return performance", "def GetSpeed(self):\n pass", "def collect_perf_data(tracker: cm.RateTracker):\n pd = PerfData()\n\n pd.compile_time = _get_optional_counter(KEY_COMPILE_TIME) * 1e-3\n pd.programming_time = _get_optional_counter(KEY_PROGRAMMING_TIME) * 1e-9\n pd.est_samples_per_sec = _get_optional_counter(KEY_SYSTEM_PERF)\n\n pd.total_samples = tracker._partial_count + tracker._count\n pd.samples_per_sec = tracker.global_rate()\n if pd.samples_per_sec > 0:\n pd.total_time = float(pd.total_samples) / pd.samples_per_sec\n else:\n pd.total_time = 0.0\n\n return pd", "def get_speed(self):\n raise NotImplementedError", "def get_speed(self):\n raise NotImplementedError", "def get_benchmark(client):\n r = client.get(config.API_PATH() + '/benchmarks')\n benchmarks = json.loads(r.data)\n return benchmarks['benchmarks'][0]['id']", "def get_speed(self):\n raise NotImplementedError()", "def tickPerf(self):\n return self._tickPerf", "def performance_measure(self, x):\n # \"calculate performance measure\" \n pref = x.evaluate()\n return pref", "def get_performance_test_cases(test_suite):\n return get_cases(test_suite, r'test_perf_')", "def on_get(self, req: Request, resp: Response):\n benchmarks = self.storage.fetch_benchmark({})\n\n response = build_benchmarks_response(benchmarks)\n resp.text = json.dumps(response)", "def benchmark_selection(self):\n return self._benchmark_selection", "def benchmark_profile(self):\n cb_bin = os.path.join(bin_path, 'compilebench')\n desc = \"benchmark\"\n test_name = \"compilebench_{0}\".format(to_safe_name(desc))\n test = TestProfile(\n name=test_name,\n desc=desc,\n test_path=self.test_path,\n bin_path=bin_path,\n command=\"{0} -D {1} -i 10 --makej\".format(cb_bin, self.test_path))\n\n return test", "def get_check_performance(self):\n\n return self._check_performance or Test.performance_params", "def cpu_time(self):", "def get_benchmark(self, benchmark):\n\t\tif not isinstance(benchmark, str) and not callable(benchmark): return benchmark\n\t\telif benchmark in self.classes:\treturn self.classes[benchmark]()\n\t\traise TypeError('Passed benchmark is not defined!')", "def execute(self):\n print_verbose_messages = (self.verbose\n and self.device.communicator.rank == 0)\n\n # Ensure that all ops are attached (needed for is_tuning_complete).\n self.run(0)\n\n if print_verbose_messages:\n print(f'Running {type(self).__name__} benchmark')\n\n if print_verbose_messages:\n print(f'.. warming up for {self.warmup_steps} steps')\n self.run(self.warmup_steps)\n\n if (isinstance(self.device, hoomd.device.GPU)\n and hasattr(self.sim.operations, 'is_tuning_complete')):\n while not self.sim.operations.is_tuning_complete:\n if print_verbose_messages:\n print('.. autotuning GPU kernel parameters for '\n f'{self.warmup_steps} steps')\n self.run(self.warmup_steps)\n\n if print_verbose_messages:\n print(f'.. running for {self.benchmark_steps} steps '\n f'{self.repeat} time(s)')\n\n # benchmark\n performance = []\n\n if isinstance(self.device, hoomd.device.GPU):\n with self.device.enable_profiling():\n for i in range(self.repeat):\n self.run(self.benchmark_steps)\n performance.append(self.get_performance())\n if print_verbose_messages:\n print(f'.. {performance[-1]} {self.units}')\n else:\n for i in range(self.repeat):\n self.run(self.benchmark_steps)\n performance.append(self.get_performance())\n if print_verbose_messages:\n print(f'.. {performance[-1]} {self.units}')\n\n return performance", "def _get_time(self, state: State) -> int:\n benchmark_time = {\n 'resnet': state.timestamp.epoch.value,\n 'bert': state.timestamp.sample.value,\n }\n return benchmark_time[self.benchmark]", "def get_cpu_speed(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetCpuSpeed', self.handle)", "def get_speed(self):\n return self.get_par(\"slew_speed\")", "def get_benchmark_result(cmd):\n print 'running benchmark with command:'\n print cmd\n p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT)\n out, err = p.communicate() # We can only load 1 url on Chrome at a time\n print out\n return out, err, p.returncode", "def get_latency(self):\n raise NotImplementedError()", "def timings(self):\r\n return self._timings", "def CalculateLatency(self):\n\t\treturn self._get_attribute('calculateLatency')", "def performances(self) -> list[Performance]:\n return [\n Performance(\n training_time=Metric(p[\"training\"][\"duration\"], 0),\n latency=Metric(self.static_metrics[\"latency\"], 0),\n num_model_parameters=Metric(\n self.static_metrics[\"num_model_parameters\"], 0\n ),\n num_gradient_updates=Metric(\n p[\"training\"][\"num_gradient_updates\"], 0\n ),\n **{\n k: Metric(p[\"testing\"][k], 0)\n for k in [\"mase\", \"smape\", \"nrmse\", \"nd\", \"ncrps\"]\n },\n )\n for p in self.metrics\n ]", "def performance(self) -> pd.DataFrame:\n return self._performance", "def get_individual_performance(self):\n\n divs = self.page.find_all(\"span\", {\"class\":\"value\"})\n values = [div.text for div in divs]\n return values", "def extract_performance(self, measurements: Optional[tuple]) -> Optional[float]:\n if not self.provide_performance:\n return None\n if not measurements:\n return None\n return measurements[1]", "def _run():\n subprocess.check_call(\n [\n \"tools/bazel\",\n \"build\",\n \"-c\",\n \"opt\",\n \"test/core/memory_usage/memory_usage_test\",\n ]\n )\n ret = {}\n for name, benchmark_args in _BENCHMARKS.items():\n for scenario, extra_args in _SCENARIOS.items():\n # TODO(chenancy) Remove when minstack is implemented for channel\n if name == \"channel\" and scenario == \"minstack\":\n continue\n try:\n output = subprocess.check_output(\n [\n \"bazel-bin/test/core/memory_usage/memory_usage_test\",\n ]\n + benchmark_args\n + extra_args\n )\n except subprocess.CalledProcessError as e:\n print(\"Error running benchmark:\", e)\n continue\n for line in output.splitlines():\n for key, (pattern, conversion) in _INTERESTING.items():\n m = re.match(pattern, line)\n if m:\n ret[scenario + \": \" + key] = conversion(m.group(1))\n return ret", "def benchmark(self, **kwargs):\n num_iterations = kwargs.get(\"benchmark_iterations\")\n\n start_time = time.time()\n\n # store how far off we are\n deviations = []\n\n for _ in xrange(num_iterations):\n kwargs[\"roll\"] = decimal.Decimal(random.uniform(\n self.MIN_BENCHMARK_ROLL, self.MAX_BENCHMARK_ROLL))\n kwargs[\"pitch\"] = decimal.Decimal(random.uniform(\n self.MIN_BENCHMARK_PITCH, self.MAX_BENCHMARK_PITCH))\n\n _, deviation = self.find_closest_trajectory(**kwargs)\n deviations.append(deviation)\n\n # calculate results from the benchmarking\n total_time = time.time() - start_time\n average_time = total_time / num_iterations\n average_deviation = sum(deviations) / len(deviations)\n\n print \"AVERAGE TIME: %s AVERAGE DEVIATION: %s\" \\\n % (average_time, average_deviation)", "def speedups(self, ref):\n\n if self.dtype != 'timings':\n raise AttributeError('speedups is not applicable on ' + self.dtype + ' object')\n\n s = 1. / BenchmarkObj.scaled_timings(self, ref).to_dataframe()\n return BenchmarkObj(s, dtype='speedups', multivar=self.multivar, multiindex=self.multiindex)", "def get_benchmark_requirements(cls):\n pass", "def count_benchmarks():\n return len(setup_storage().fetch_benchmark({}))", "def print_performance_info(self):\n pass", "async def speed(self, value=None):\n return self.extract(await self._rpc.speed()) if value is None else (await self._rpc.speed(self.extend(value)))", "def Run(benchmark_spec):\n vms = benchmark_spec.vms\n results = []\n\n logging.info('Iperf Results:')\n\n # Send traffic in both directions\n for sending_vm, receiving_vm in vms, reversed(vms):\n # Send using external IP addresses\n if vm_util.ShouldRunOnExternalIpAddress():\n results.append(_RunIperf(sending_vm,\n receiving_vm,\n receiving_vm.ip_address,\n 'external'))\n\n # Send using internal IP addresses\n if vm_util.ShouldRunOnInternalIpAddress(sending_vm,\n receiving_vm):\n results.append(_RunIperf(sending_vm,\n receiving_vm,\n receiving_vm.internal_ip,\n 'internal'))\n\n return results", "def benchmark_it(self, with_gc):\n if self.run_sec is None:\n benchmark_result = self.src\n elif with_gc:\n gc_old = gc.isenabled()\n gc.enable()\n try:\n benchmark_result = self.inner()\n benchmark_result['name'] = self.name\n finally:\n if not gc_old:\n gc.disable()\n else:\n gc_old = gc.isenabled()\n gc.disable()\n try:\n benchmark_result = self.inner()\n benchmark_result['name'] = self.name\n finally:\n if gc_old:\n gc.enable()\n return benchmark_result", "def get_process_speed(self, pid):\n raise NotImplementedError()", "def perf_config(self):\n\n return self._perf_config", "def metrics(self) -> list[dict[str, dict[str, float | int]]]:\n return self.performance[\"performances\"]", "def get_speed(self):\n return self._speed", "def getTime():\n\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def benchmark():\n print defer.Deferred.__module__\n for func, args, iter in benchmarkFuncs:\n print func.__name__, args, timeit(func, iter, *args)", "def getCpuTimes( self ):\n\n pars\t= ( _EVENT_CPU_TIME, 0, 0, 0 )\n values = self.adbGetEvent( pars )\n return values[2]", "def GetCpuTimestamp(self):\n return {'TotalTime': time.time()}", "def main(cls):\n parser = cls.make_argument_parser()\n args = parser.parse_args()\n args.device = make_hoomd_device(args)\n benchmark = cls(**vars(args))\n performance = benchmark.execute()\n\n if args.device.communicator.rank == 0:\n print(f'{numpy.mean(performance)}')", "def get_custom_speed(self):\n return self._custom_speed", "def get_benchmark_specification(benchmark = 'FSI1'):\n if benchmark == 'FSI1':\n rho_s = Constant(1e03)\n nu_s = Constant(0.4)\n mu_s = Constant(5e05)\n rho_f = Constant(1e03)\n nu_f = Constant(1e-03)\n U = 0.2\n T_end = 60.0\n result = \"results-FSI1/\"\n elif benchmark == 'FSI2':\n rho_s = Constant(1e04)\n nu_s = Constant(0.4)\n mu_s = Constant(5e05)\n rho_f = Constant(1e03)\n nu_f = Constant(1e-03)\n U = 1.0\n T_end = 15.0\n result = \"results-FSI2/\"\t\t\n elif benchmark == 'FSI3':\n rho_s = Constant(1e03)\n nu_s = Constant(0.4)\n mu_s = Constant(2e06)\n rho_f = Constant(1e03)\n nu_f = Constant(1e-03)\n U = 2.0\n T_end = 20.0\n result = \"results-FSI3/\"\t\t\n else:\n raise ValueError('\"{}\" is a wrong name for problem specification.'.format(benchmark))\n v_max = Constant(1.5*U) # mean velocity to maximum velocity \n # (we have parabolic profile)\n E_s = Constant(2*mu_s*(1+nu_s))\n lambda_s = Constant((nu_s*E_s)/((1+nu_s)*(1-2*nu_s)))\n mu_f = Constant(nu_f*rho_f)\n return v_max, lambda_s, mu_s, rho_s, mu_f, rho_f, T_end, result", "def get_cpu_profiler() -> SimpleCPUProfiler:\n return _cpu_instance", "def speed_get(self):\n\n return self.__get(Speed)", "def measure(self):\n pass", "def Run(benchmark_spec):\n _UpdateBenchmarkSpecWithFlags(benchmark_spec)\n vm = benchmark_spec.vms[0]\n if benchmark_spec.tpus:\n # For MLPerf 1.0, the benchmake code of different hardware are different.\n if (benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-32' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-128' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-256' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-512' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-1024' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-2048'):\n run_path = (\n '$HOME/training_results_{version}/Google/benchmarks/{model}/tpu-{tpus}'\n .format(\n version=VERSION.value,\n model=benchmark_spec.benchmark,\n tpus=benchmark_spec.tpu_groups['train'].GetAcceleratorType()))\n code_path = (\n '$HOME/training_results_{version}/Google/benchmarks/{model}/implementations/tpu-{tpus}-{model}'\n .format(\n version=VERSION.value,\n model=benchmark_spec.benchmark,\n tpus=benchmark_spec.tpu_groups['train'].GetAcceleratorType()))\n\n if MASK in benchmark_spec.benchmark:\n model = 'mask_rcnn'\n elif GNMT in benchmark_spec.benchmark:\n model = 'nmt'\n else:\n model = benchmark_spec.benchmark\n\n mlperf_benchmark_cmd = (\n 'cd {code_path} && '\n 'export PYTHONPATH=$(pwd):$(pwd)/{model} && '\n 'cd {model} && '\n '{run_path}/run_and_time.sh'.format(\n code_path=code_path,\n model=model,\n run_path=run_path))\n\n if SSD in benchmark_spec.benchmark:\n mlperf_benchmark_cmd = (\n 'export '\n 'MLP_GCS_RESNET_CHECKPOINT={checkpoint}'\n ' && {cmd}'.format(\n checkpoint=FLAGS.mlperf_gcs_resnet_checkpoint,\n cmd=mlperf_benchmark_cmd))\n else:\n raise ValueError(\n 'MLPerf configurations do not support the hardware in PKB. PKB may '\n 'need to be updated if this is a new TPU type.')\n\n else:\n run_sub_paths = {RESNET: 'resnet/implementations/mxnet',\n TRANSFORMER: 'transformer/implementations/pytorch',\n MINIGO: 'minigo/implementations/tensorflow',\n MASK: 'maskrcnn/implementations/pytorch',\n GNMT: 'gnmt/implementations/pytorch',\n SSD: 'ssd/implementations/pytorch',\n BERT: 'bert/implementations/pytorch',}\n benchmark_path = f'$HOME/training_results_{VERSION.value}/NVIDIA/benchmarks'\n run_path = posixpath.join(benchmark_path,\n run_sub_paths[benchmark_spec.benchmark])\n env = {\n 'DGXSYSTEM': DGXSYSTEM,\n 'NEXP': 1,\n 'PULL': 0,\n 'LOGDIR': f'/tmp/{benchmark_spec.benchmark}',\n }\n envs = {\n RESNET: {},\n TRANSFORMER: {'DATADIR': '/data/wmt/utf8'},\n MINIGO: {'CONT': 'mlperf-nvidia:minigo'},\n MASK: {},\n GNMT: {'DATADIR': '/data/gnmt'},\n SSD: {'DATADIR': '/data'},\n BERT: {}\n }\n env.update(envs[benchmark_spec.benchmark])\n\n run_script = posixpath.join(run_path, 'run_with_docker.sh')\n vm_util.ReplaceText(vm, 'SYSLOGGING=1', 'SYSLOGGING=0', run_script)\n vm_util.ReplaceText(vm, 'docker exec -it', 'docker exec -t', run_script)\n if benchmark_spec.benchmark == RESNET:\n vm_util.ReplaceText(vm, r'mpirun.*run_and_time\\.sh',\n r'.\\/run_and_time.sh', run_script)\n\n env = ' '.join(f'{key}={value}' for key, value in env.items())\n if nvidia_driver.CheckNvidiaGpuExists(vm):\n env = f'{tensorflow.GetEnvironmentVars(vm)} {env}'\n\n mlperf_benchmark_cmd = (\n f'chmod 755 {run_script} && '\n f'cd {run_path} && '\n f'{env} {run_script}')\n\n samples = []\n metadata = _CreateMetadataDict(benchmark_spec)\n stdout, _ = vm.RobustRemoteCommand(mlperf_benchmark_cmd)\n if NONE in FLAGS.mlperf_profiler:\n samples.extend(\n MakeSamplesFromOutput(\n metadata,\n stdout,\n use_tpu=bool(benchmark_spec.tpus),\n model=benchmark_spec.benchmark))\n return samples", "def __benchmark(self, clf):\n print('=' * 80)\n print('Training: ')\n print(clf)\n train_start = time()\n clf.fit(self.X_train, self.Y_train)\n train_time = time() - train_start\n print(\"The training time was: %0.3fs\" % train_time)\n\n test_start = time()\n pred = clf.predict(self.X_test)\n test_time = time() - test_start\n print(\"The test time was: %0.3fs\" % test_time)\n\n score = metrics.accuracy_score(self.Y_test, pred)\n print(\"accuracy: %0.3f\" % score)\n\n return score", "def getcpuspeed():\n f = os.popen(\"/opt/vc/bin/vcgencmd get_config arm_freq\")\n cpu = f.read()\n return cpu", "def run_benchmark(curl, benchmark, test_config = TestConfig()):\n\n warmup_runs = benchmark.warmup_runs\n benchmark_runs = benchmark.benchmark_runs\n message = '' #Message is name of benchmark... print it?\n\n if (warmup_runs <= 0):\n raise Exception(\"Invalid number of warmup runs, must be > 0 :\" + warmup_runs)\n if (benchmark_runs <= 0):\n raise Exception(\"Invalid number of benchmark runs, must be > 0 :\" + benchmark_runs)\n\n #Initialize variables to store output\n output = BenchmarkResult()\n output.name = benchmark.name\n output.group = benchmark.group\n metricnames = list(benchmark.metrics)\n metricvalues = [METRICS[name] for name in metricnames] # Metric variable for curl, to avoid hash lookup for every metric name\n results = [list() for x in xrange(0, len(metricnames))] # Initialize arrays to store results for each metric\n\n curl.setopt(pycurl.WRITEFUNCTION, lambda x: None) #Do not store actual response body at all.\n\n #Benchmark warm-up to allow for caching, JIT compiling, on client\n logging.info('Warmup: ' + message + ' started')\n for x in xrange(0, warmup_runs):\n if benchmark.method == u'POST' or benchmark.method == u'PUT':\n curl.setopt(curl.READFUNCTION, StringIO.StringIO(benchmark.body).read)\n curl.perform()\n logging.info('Warmup: ' + message + ' finished')\n\n logging.info('Benchmark: ' + message + ' starting')\n\n for x in xrange(0, benchmark_runs): # Run the actual benchmarks\n if benchmark.method == u'POST' or benchmark.method == u'PUT':\n curl.setopt(curl.READFUNCTION, StringIO.StringIO(benchmark.body).read)\n\n try: # Run the curl call, if it errors, then add to failure counts for benchmark\n curl.perform()\n except Exception:\n output.failures = output.failures + 1\n continue # Skip metrics collection\n\n # Get all metrics values for this run, and store to metric lists\n for i in xrange(0, len(metricnames)):\n results[i].append( curl.getinfo(metricvalues[i]) )\n\n logging.info('Benchmark: ' + message + ' ending')\n\n temp_results = dict()\n for i in xrange(0, len(metricnames)):\n temp_results[metricnames[i]] = results[i]\n output.results = temp_results\n\n curl.close()\n return analyze_benchmark_results(output, benchmark)", "def __evaluate_performance__(self,\n types=[\"static_winners\"],\n ):\n # | - __evaluate_performance__\n\n # #####################################################################\n _evaluate_performance__static_winners = \\\n self._evaluate_performance__static_winners\n meth_static_winners = _evaluate_performance__static_winners\n # #####################################################################\n\n if \"static_winners\" in types:\n meth_static_winners()\n\n #__|", "def get_ib_speed():\n if get_cluster_vendor() == \"sgi\":\n return sgi_cluster.get_ib_speed()\n elif get_cluster_vendor() == \"ibm\": \n return ibm_cluster.get_ib_speed()\n return None", "def _run_benchmark(self, params):\n logging.info('Running benchmark [%s]', self._get_name())\n params = benchmark_cnn.setup(params)\n bench = benchmark_cnn.BenchmarkCNN(params)\n bench.print_info()\n stats = bench.run()\n extras = {}\n extras['examples_per_sec'] = stats.get('images_per_sec')\n if 'last_average_loss' in stats:\n extras['last_average_loss'] = stats['last_average_loss']\n if 'top_1_accuracy' in stats:\n extras['top_1_accuracy'] = stats['top_1_accuracy']\n if 'top_5_accuracy' in stats:\n extras['top_5_accuracy'] = stats['top_5_accuracy']\n self.report_benchmark(\n iters=stats.get('num_steps'),\n wall_time=stats.get('average_wall_time'),\n extras=extras)", "def measure(self):\n return self._measure", "def getStats(self):\n\n raise NotImplementedError", "def benchmark(func):\n start = time.time()\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n rc = func(*args, **kwargs)\n print('Running time: {}'.format(time.time() - start))\n return rc\n return wrapper", "def clock_speed(self):\n return self._clock_speed", "def runtime(self):\n return self.stop_time - self.start_time", "def get_profile_stats():\n return p_stats", "def get_measured_current(self):\n status = self.get_status_response()\n current = status[16] + (status[17] * 0x100) + (status[18] * 0x10000) + (status[19] * 0x1000000)\n current = float(current)\n current /= (1000.0 * 1000.0)\n return current\n #end get_measured_current", "def speedtest():\n try:\n res = st.Speedtest()\n except st.ConfigRetrievalError:\n return \"Not connected to internet\"\n\n # Create a spinner on command line to show that its running\n print('Running the test ')\n\n res.get_best_server()\n download_speed = res.download()\n upload_speed = res.upload()\n\n # Print the results\n print('Speed test results:')\n print('Download: ' + pretty_speed(download_speed))\n print('Upload: ' + pretty_speed(upload_speed))", "def bench_report(t1, t2):\n print \"\\n\\n Time taken: {0}\".format(t2 - t1)", "def findBenchFromDevice(self, device):\n return device.bench", "def get_perf(self) :\n self.train()\n\n prediction = self.clf.predict(self.df_test.drop(columns = 'up')[:-1])\n self.accuracy = accuracy_score(df_test['up'][length:].values, prediction)\n tn, fp, fn, tp = confusion_matrix(df_test['up'][length:].values, prediction).ravel()\n self.recall = tp/(tp+fn)\n self.specificity = tn / (tn+fp)\n\n\n self.df_true = self.df_true[self.length:]\n\n profit = 1\n mini = 1\n maxi = 1\n self.df_true['close'] = self.df_true['close'].map(lambda x : np.exp(x))\n for s in range(1,len(self.df_true)):\n if prediction[x-1] == 1 :\n result = ((self.df_true['close'].iloc[s] -self.df_true['close'].iloc[s-1]) / self.df_true['close'].iloc[s-1]) + 1\n profit = profit * result\n if result < mini :\n mini = result\n if maxi < result :\n maxi = result\n self.mini = mini\n self.maxi = maxi\n self.profit = profit", "def get_perfcount_type(self):\n return self._perfcount_type", "def estimatePerformance(self, model):\n Y_pred = model.computeLOO()\n #performance = self.measure.multiOutputPerformance(self.Y, Y_pred)\n #performance = self.measure.getPerformance(self.Y, Y_pred)\n #performance = measure_utilities.aggregate(performance)\n performance = self.measure(self.Y, Y_pred)\n self.predictions.append(Y_pred)\n return performance", "def execute_benchmark(path, budget=None, threads=None):\n benchmark_path = runtime.binary_path(path)\n cmd = [benchmark_path, \"--benchmark_format=json\"]\n if budget is not None:\n cmd += [\"-b\", str(budget)]\n if threads is not None:\n cmd += [\"-t\", str(threads)]\n res = process.execute(cmd)\n return json.loads(res.std_out)", "def get_elapsed_time(self):\r\n self.get_bb_result()\r\n csv_path = self.bb_log_path + os.sep + 'run-logs' + os.sep + 'BigBenchTimes.csv'\r\n if not os.path.isfile(csv_path):\r\n print('BigBenchTimes.csv does not exist in {0}, existing...'.format(self.bb_log_path))\r\n exit(-1)\r\n df = pd.read_csv(csv_path, delimiter=';').loc[:,\r\n ['benchmarkPhase', 'streamNumber', 'queryNumber', 'durationInSeconds']]\r\n elapsed_time = pd.DataFrame()\r\n is_exist = False\r\n for phase in ['POWER_TEST', 'THROUGHPUT_TEST_1']:\r\n benchmark_phase = (df['benchmarkPhase'] == phase)\r\n if any(benchmark_phase): # whether this phase exist in the BB logs\r\n if phase == 'POWER_TEST': # power test overall and each query\r\n stream_num = ((df['streamNumber']) == 0)\r\n query_num = (pd.notnull(df['queryNumber']))\r\n mask = benchmark_phase & stream_num & query_num\r\n seconds = df[mask]['durationInSeconds'].values\r\n elapsed_time.insert(0, phase, seconds)\r\n elapsed_time.index = df[mask]['queryNumber'].astype('int64')\r\n elif phase == 'THROUGHPUT_TEST_1':\r\n streams = int(np.max(df['streamNumber']))\r\n for stream in range(streams + 1):\r\n stream_num = ((df['streamNumber']) == stream)\r\n query_num = (pd.notnull(df['queryNumber']))\r\n mask = benchmark_phase & stream_num & query_num\r\n seconds = df[mask]['durationInSeconds'].values\r\n elapsed_time.insert(stream + 1, 'stream{0}'.format(stream), seconds)\r\n elapsed_time.index = df[mask]['queryNumber'].astype('int64')\r\n is_exist = True\r\n if is_exist:\r\n print('*' * 100)\r\n print('Elapsed time of each query:\\n {0} \\n'.format(elapsed_time.to_string()))\r\n\r\n result_path = self.bb_log_path + os.sep + 'bb_results.log'\r\n with open(result_path, 'a') as f:\r\n f.write('*' * 100 + '\\n')\r\n f.write('Elapsed time of each query:\\n {0} \\n'.format(elapsed_time.to_string()))\r\n else:\r\n print('It seems BigBenchTimes.csv in {0} does not include TPCx-BB phases:POWER_TEST, THROUGHPUT_TEST_1' \\\r\n 'existing...'.format(self.bb_log_path))\r\n exit(-1)", "def calculate_system_performance(self):\n\n self._calculate_high_order_wfe()\n self._calculate_strehl()", "def statistics(target):\n worker=current_worker()\n if (not is_worker_thread()) or (not worker) or (not worker.isopen) or (worker.index<0):\n logging.warning(\"statistics can only be called from activated worker thread\")\n return\n with worker.statistics_lock:\n return target(worker.statistics_data)", "def get_run_stats(self):\n return self.run_stats", "def get_cpu(self):\n pass", "def get_resources():\n # Acquire the lock...\n get_resources_lock.acquire()\n\n # ...but always release it\n try:\n # Construct the dictionaries as copies from nanny\n (limits,usage) = nanny.get_resource_information()\n\n\n # Calculate all the usage's\n pid = os.getpid()\n\n # Get CPU and memory, this is thread specific\n if ostype in [\"Linux\", \"Darwin\"]:\n \n # Get CPU first, then memory\n usage[\"cpu\"] = os_api.get_process_cpu_time(pid)\n\n # This uses the cached PID data from the CPU check\n usage[\"memory\"] = os_api.get_process_rss()\n\n # Get the thread specific CPU usage\n usage[\"threadcpu\"] = os_api.get_current_thread_cpu_time() \n\n\n # Windows Specific versions\n elif ostype in [\"Windows\"]:\n \n # Get the CPU time\n usage[\"cpu\"] = windows_api.get_process_cpu_time(pid)\n\n # Get the memory, use the resident set size\n usage[\"memory\"] = windows_api.process_memory_info(pid)['WorkingSetSize'] \n\n # Get thread-level CPU \n usage[\"threadcpu\"] = windows_api.get_current_thread_cpu_time()\n\n # Unknown OS\n else:\n raise EnvironmentError(\"Unsupported Platform!\")\n\n # Use the cached disk used amount\n usage[\"diskused\"] = cached_disk_used\n\n finally:\n # Release the lock\n get_resources_lock.release()\n\n # Copy the stop times\n stoptimes = process_stopped_timeline[:]\n\n # Return the dictionaries and the stoptimes\n return (limits,usage,stoptimes)", "def measure_test(self):\n return self.execute(Sgp40I2cCmdExecuteSelfTest())", "def build_benchmark(base_url, node):\n node = lowercase_keys(flatten_dictionaries(node)) # Make it usable\n\n benchmark = Benchmark()\n\n # Read & set basic test parameters\n benchmark = build_test(base_url, node, benchmark)\n\n # Complex parsing because of list/dictionary/singleton legal cases\n for key, value in node.items():\n if key == u'warmup_runs':\n benchmark.warmup_runs = int(value)\n elif key == u'benchmark_runs':\n benchmark.benchmark_runs = int(value)\n elif key == u'output_format':\n format = value.lower()\n if format in OUTPUT_FORMATS:\n benchmark.output_format = format\n else:\n raise Exception('Invalid benchmark output format: ' + format)\n elif key == u'output_file':\n if not isinstance(value, basestring):\n raise Exception(\"Invalid output file format\")\n benchmark.output_file = value\n elif key == u'metrics':\n if isinstance(value, unicode) or isinstance(value,str):\n # Single value\n benchmark.add_metric(unicode(value, 'UTF-8'))\n elif isinstance(value, list) or isinstance(value, set):\n # List of single values or list of {metric:aggregate, ...}\n for metric in value:\n if isinstance(metric, dict):\n for metricname, aggregate in metric.items():\n if not isinstance(metricname, basestring):\n raise Exception(\"Invalid metric input: non-string metric name\")\n if not isinstance(aggregate, basestring):\n raise Exception(\"Invalid aggregate input: non-string aggregate name\")\n # TODO unicode-safe this\n benchmark.add_metric(unicode(metricname,'UTF-8'), unicode(aggregate,'UTF-8'))\n\n elif isinstance(metric, unicode) or isinstance(metric, str):\n benchmark.add_metric(unicode(metric,'UTF-8'))\n elif isinstance(value, dict):\n # Dictionary of metric-aggregate pairs\n for metricname, aggregate in value.items():\n if not isinstance(metricname, basestring):\n raise Exception(\"Invalid metric input: non-string metric name\")\n if not isinstance(aggregate, basestring):\n raise Exception(\"Invalid aggregate input: non-string aggregate name\")\n benchmark.add_metric(unicode(metricname,'UTF-8'), unicode(aggregate,'UTF-8'))\n else:\n raise Exception(\"Invalid benchmark metric datatype: \"+str(value))\n\n return benchmark", "def meter_stats():\n current_time = time.time()\n r = requests.get('http://localhost:8080/stats/flow/1')\n r.raise_for_status()\n data = r.json()\n bytes_tx = 0\n for stat in data['1']:\n if stat['match'].get('dl_src') == '00:00:00:00:00:01':\n bytes_tx += stat['byte_count']\n global LAST_TIME\n global LAST_BYTES_TX\n time_diff = current_time - LAST_TIME\n byte_diff = bytes_tx - LAST_BYTES_TX\n LAST_TIME = current_time\n LAST_BYTES_TX = bytes_tx\n transfer_rate = byte_diff / time_diff / 1024\n # We need to accomodate the dropping of our rule with the hard timeout\n return jsonify({'transfer_rate': transfer_rate})", "def performance_analytics(self):\n return pf.create_returns_tear_sheet(self.port_rets, return_fig=True)", "def performance_test():\n from timeit import Timer\n t = Timer(\"test()\", \"from __main__ import test\")\n print t.timeit(number=1)", "def get_overall_cpu_util(dut, exclude_proc_name=None):", "def get_fan_speed(self):\n response = self.parent.fancoolers.get_speed()\n if response is not None:\n response = response[0]\n return response", "def compare_performance(self):\n\n if self.label_type == \"categorical\":\n self._eval_classifier()\n\n elif self.label_type == \"numerical\":\n self._eval_regressor()\n\n return self.performance_comparison", "def benchmark(func):\n import time\n @wraps(func)\n def wrapper(*args, **kwargs):\n t = time.clock()\n res = func(*args, **kwargs)\n print(func.__name__, time.clock()-t)\n return res\n return wrapper", "def list_runtimes(self, workbench):\n pass" ]
[ "0.7716694", "0.6812372", "0.66633004", "0.64185244", "0.640215", "0.6386745", "0.6361457", "0.6321419", "0.6289639", "0.6282566", "0.6282566", "0.6264686", "0.6244597", "0.6237336", "0.62242454", "0.621317", "0.62114346", "0.62091786", "0.6123424", "0.61178464", "0.61099523", "0.6070691", "0.6052437", "0.59879875", "0.59754914", "0.5964315", "0.5941265", "0.5929094", "0.5905383", "0.58790445", "0.5863032", "0.58359504", "0.5826487", "0.5800811", "0.5782847", "0.5776392", "0.577316", "0.5770583", "0.57621527", "0.57563496", "0.57469594", "0.57252383", "0.5719081", "0.57015353", "0.56999636", "0.56878597", "0.56721514", "0.5663367", "0.562164", "0.562164", "0.562164", "0.562164", "0.562164", "0.562164", "0.56155086", "0.56010985", "0.55953723", "0.5594144", "0.5578473", "0.5572088", "0.5568318", "0.55583644", "0.55536455", "0.55527353", "0.55374056", "0.55252206", "0.5523765", "0.55164295", "0.55144984", "0.5508517", "0.55068815", "0.5505139", "0.54977643", "0.548421", "0.5478812", "0.5473296", "0.5471622", "0.54696476", "0.54692334", "0.54653484", "0.54612815", "0.5456004", "0.54533684", "0.54464316", "0.54274476", "0.54256564", "0.542438", "0.5424334", "0.54228365", "0.540809", "0.54046184", "0.5400411", "0.5397021", "0.539513", "0.5388115", "0.53837043", "0.5373189", "0.5369347", "0.53652954", "0.5359488" ]
0.61632097
18
Make an ArgumentParser instance for comparative benchmark options.
def make_argument_parser(): parser = Benchmark.make_argument_parser() parser.add_argument('--skip-reference', action='store_true', help='Skip the reference simulation run.') return parser
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_argument_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--device',\n type=str,\n choices=['CPU', 'GPU'],\n help='Execution device.',\n required=True)\n parser.add_argument('-N',\n type=int,\n default=DEFAULT_N,\n help='Number of particles.')\n parser.add_argument('--rho',\n type=float,\n default=DEFAULT_RHO,\n help='Number density.')\n parser.add_argument('--dimensions',\n type=int,\n choices=[2, 3],\n help='Number of dimensions.',\n default=DEFAULT_DIMENSIONS)\n parser.add_argument('--warmup_steps',\n type=int,\n default=DEFAULT_WARMUP_STEPS,\n help='Number of timesteps to run before timing.')\n parser.add_argument('--benchmark_steps',\n type=int,\n default=DEFAULT_BENCHMARK_STEPS,\n help='Number of timesteps to run in the benchmark.')\n parser.add_argument('--repeat',\n type=int,\n default=DEFAULT_REPEAT,\n help='Number of times to repeat the run.')\n parser.add_argument('-v',\n '--verbose',\n action='store_true',\n help='Verbose output.')\n return parser", "def build_arg_parser():\n\n main = ArgumentParser(description='AMFinder command-line arguments.',\n allow_abbrev=False,\n formatter_class=RawTextHelpFormatter)\n\n subparsers = main.add_subparsers(dest='run_mode', required=True,\n help='action to be performed.')\n\n _ = training_subparser(subparsers)\n _ = prediction_subparser(subparsers)\n _ = diagnostic_subparser(subparsers)\n\n return main", "def make_argument_parser():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"data_directory\",\r\n help=\"Directory where the data files live.\")\r\n parser.add_argument(\"out\", help=\"Output directory of files.\")\r\n parser.add_argument(\"-t\", \"--test\", action=\"store_true\",\r\n help=(\"Test mode, avoids slow classifiers and uses\"\r\n \" 3 folds\"))\r\n parser.add_argument(\"--folds\", default=10,\r\n help=\"Number of folds for n-fold cross validation\")\r\n parser.add_argument(\"--data_pattern\", default=\"*.mat\",\r\n help=\"Pattern for data files\")\r\n parser.add_argument(\"--label_pattern\", default=\"*.mat\",\r\n help=\"Pattern for label files\")\r\n return parser", "def arg_parser(cls):\n parser = argparse.ArgumentParser(\n description='{} options'.format(cls.__name__),\n usage=('dotest.py --results-formatter-options='\n '\"--option1 value1 [--option2 value2 [...]]\"'))\n parser.add_argument(\n \"--dump-results\",\n action=\"store_true\",\n help=('dump the raw results data after printing '\n 'the summary output.'))\n return parser", "def make_parser():\n p = argparse.ArgumentParser(\n description=\"Visualize and analyze error from oblique/straight tag observations\"\n )\n\n p.add_argument(\"-n\", help=\"name of the test in the config file\")\n\n p.add_argument(\"-t\", help=\"throw out bad tags\", action=\"store_true\")\n\n p.add_argument(\"-v\", help=\"visualize data\", action=\"store_true\")\n\n p.add_argument(\"-i\", help=\"print result data\", action=\"store_true\")\n\n return p", "def cmd_line_parser():\n usage = \"usage: %prog [options]\\n\"\n opt_parser = OptionParser(usage=usage)\n opt_parser.add_option(\"--ai\", action=\"store\", dest=\"alternative_input\",\n help=\"an alternative input file (works only with load_from_pickle)\")\n opt_parser.add_option(\"--dl\", action=\"store\", dest=\"dumped_lexicon\",\n help=\"a dumped lexicon file (works only with load_from_pickle\")\n opt_parser.add_option(\"--dotest\", action=\"store_true\", dest=\"dotest\", default=False,\n help=\"use this flag if you want to apply testing\")\n opt_parser.add_option(\"-t\", action=\"store\", dest=\"test_parses\",\n help=\"the output file for the test parses\")\n opt_parser.add_option(\"-n\", action=\"store\", dest=\"train_parses\",\n help=\"the output file for the train parses\")\n opt_parser.add_option(\"-i\", dest=\"inp_file\", default=\"trainFiles/trainPairs\",\n help=\"the input file names (with the annotated corpus)\")\n opt_parser.add_option(\"--devel\", dest=\"development_mode\", default=False, action=\"store_true\",\n help=\"development mode\")\n\n return opt_parser", "def create_basic_parse():\n # SEE: https://docs.python.org/3/library/argparse.html\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--path_cover', type=str, required=True,\n help='path to the csv cover file')\n parser.add_argument('-d', '--path_dataset', type=str, required=False,\n help='path to the dataset location, '\n 'if missing in cover', default=None)\n parser.add_argument('-o', '--path_out', type=str, required=True,\n help='path to the output directory')\n parser.add_argument('--unique', dest='unique', action='store_true',\n help='whether each experiment have unique time stamp')\n parser.add_argument('--visual', dest='visual', action='store_true',\n help='whether visualise partial results')\n parser.add_argument('--lock_expt', dest='lock_thread', action='store_true',\n help='whether lock to run experiment in single thread')\n parser.add_argument('--run_comp_benchmark', action='store_true',\n help='run computation benchmark on the end')\n parser.add_argument('--nb_workers', type=int, required=False, default=1,\n help='number of registration running in parallel')\n return parser", "def make_cli_parser(self):\n super(ContextualArgParser, self).make_cli_parser()\n self.cli_parser.add_option('--num-permutations', type='int',\n default=cbpn.NUM_PERMUTATIONS,\n help=(\"number of permutations for statistics \"\n \"[default: %default]\")\n )\n self.cli_parser.add_option('-s', '--edge-swaps', type='int',\n help=(\"Perform the given number of edge swaps to \"\n \"produce random graphs. [NOTE: using this option \"\n \"changes the algorithm for determining \"\n \"significance of a link between each given pair \"\n \"of terms.]\"\n )\n )\n self.cli_parser.add_option('--no-estimation', dest='estimate',\n action='store_false', default=True,\n help=(\"Do not use p-value estimation, but run the \"\n \"full number of permutations for every pair of \"\n \"annotation terms. [NOTE: this can substantially \"\n \"increase running time.]\"\n )\n )\n self.cli_parser.add_option('--score-correction',\n action='store_true', default=False,\n help=(\"Correct scores for each pair of terms by an \"\n \"\\\"expected\\\" value calculated from the mean \"\n \"expression value.\"\n )\n )", "def build_argument_parser():\n description=\"A simple tool to batch rename given files.\"\n parser = ArgumentParser(description=description)\n parser.add_argument(\"-i\", \"--input-list\", required=False,\n help=\"the path to the input list file.\")\n parser.add_argument(\"-p\", \"--glob-pattern\", default=DEFAULT_GLOB_PATTERN,\n help=\"a glob pattern to filter input files.\")\n return parser", "def MakeOpts():\n parser = ArgumentParser()\n\n parser.add_argument(\"-o\", \"--host\", dest=\"host\", default=\"hldbv02\",\n help=\"The hostname for the MySQL database\")\n parser.add_argument('-d', '--debug', action='store_true', default=False,\n help='debug mode, store results in dummy DB')\n \n xml_group = parser.add_mutually_exclusive_group(required=True)\n xml_group.add_argument(\"-x\", \"--xml_filename\", default=None,\n help=\"The filename for a single XML result file\")\n xml_group.add_argument(\"-a\", \"--xml_dir\", default=None,\n help=\"The directory from which to import the latest XML results file\")\n \n parser.add_argument(\"-p\", \"--plate\", default=None, type=int, required=True,\n help=\"The plate number (usually between 1-10) in the robot script\")\n parser.add_argument('exp_id_csv', nargs=1,\n help='the name of the CVS file where the exp_ids are')\n\n return parser", "def get_parser():\n\n parser = argparse.ArgumentParser(description=textwrap.dedent(\"\"\"\n Downloads and tests the md5 and file size of a given version of Anaconda located in\n http://repo.continuum.io/archive/\n\n The version option (-v) allows you to select a specific version of Anaconda to download and test.\n This will include every system's Anaconda distribution for that version (OSX, Windows, Linux)\n\n The --log option will write the results of these tests to a log file. If not enabled, results\n will be written to stdout.\n\n If you already have Anaconda installers inside the pkgs directory and wish to test those without\n downloading new ones, use the --no-download option. NOTE: You will still need to provide the\n version (-v) of the installers.\n \"\"\"), formatter_class=argparse.RawTextHelpFormatter)\n\n parser.add_argument('--log', action='store_true', dest='log', default=False,\n help=\"save a log of any errors discovered\")\n parser.add_argument('-v', '--version', action='store', default=False,\n help=\"version of Anaconda to download and test\")\n parser.add_argument('--no-download', action='store_true', dest='nodl', default=False,\n help=\"test local anaconda packages in pkgs, rather than download new ones\")\n\n return parser", "def set_options():\n parser = argparse.ArgumentParser(description='test hexrd.quadrature')\n\n return parser", "def _create_parser(self):\n default_options = self._create_defaults()\n\n all_categories = ['build', 'whitespace']\n\n mock_stderr = self._MockStdErr()\n\n return ArgumentParser(\n all_categories=all_categories,\n base_filter_rules=[],\n default_options=default_options,\n mock_stderr=mock_stderr,\n usage='test usage')", "def command_line_argument_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(\n description=description(),\n epilog=epilog(),\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n return parser", "def command_line_argument_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(\n description=description(),\n epilog=epilog(),\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n return parser", "def create_cli_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('json_file', action='store',\n type=str, help=('Outlier per threshold file. This file '\n 'should have been generated by the '\n 'calculate_outliers_by_threshold '\n 'script.'))\n return parser", "def mkOptionParser():\n \n usage = \"\"\"%prog <input.bed> <output.bed> <threshold>\n %prog filters out the lines that don't meet a certain threshold. \"\"\"\n\n parser = OptionParser(usage)\n \n\n return parser", "def create_argument_parser(cls):\n\n parser = super().create_argument_parser()\n\n # GitHub options\n group = parser.add_argument_group('GitHub arguments')\n\n group.add_argument(\"--owner\", required=True,\n help=\"GitHub owner\")\n group.add_argument(\"--repository\", required=True,\n help=\"GitHub repository\")\n group.add_argument(\"--sleep-for-rate\", dest='sleep_for_rate',\n action='store_true',\n help=\"sleep for getting more rate\")\n group.add_argument(\"--min-rate-to-sleep\", dest='min_rate_to_sleep',\n default=MIN_RATE_LIMIT, type=int,\n help=\"sleep until reset when the rate limit reaches this value\")\n\n return parser", "def argument_parser():\n parser = argparse.ArgumentParser(\n description='description',\n formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('-n','--numcolors', type=int, help=\"Number of colors\", required=True)\n return parser", "def create_options():\n optparser = optparse.OptionParser()\n optparser.add_option(\"-f\", \"--filename\", type=\"string\",\n help=\"execute a single unit test file\")\n optparser.add_option(\"-s\", \"--subprocess\", action=\"store_true\",\n default=False,\n help=\"run everything in an own subprocess \"\n \"(default: use a single process)\")\n optparser.add_option(\"-t\", \"--timeout\", type=\"int\", default=70,\n help=\"Timout for subprocesses before being killed \"\n \"(default: 70s per file)\")\n optparser.add_option(\"-v\", \"--verbose\", action=\"store_true\", default=False,\n help=\"be verbose and print anything instantly\")\n optparser.add_option(\"-r\", \"--random\", action=\"store_true\", default=False,\n help=\"randomize the order of tests\")\n optparser.add_option(\"-S\", \"--seed\", type=\"int\",\n help=\"seed the randomizer(useful to \"\n \"recreate earlier randomized test cases)\")\n optparser.add_option(\"-i\", \"--interactive\", action=\"callback\",\n callback=include_tag,\n callback_args=(\"interactive\",),\n help=\"also execute interactive tests\")\n optparser.add_option(\"-e\", \"--exclude\", action=\"callback\",\n callback=exclude_tag, type=\"string\",\n help=\"exclude test containing the tag\")\n optparser.add_option(\"-l\", \"--listtags\", action=\"callback\",\n callback=list_tags,\n help=\"lists all available tags and exits\")\n optparser.add_option(\"--logfile\", type=\"string\",\n help=\"save output to log file\")\n optkeys = [\"filename\",\n \"subprocess\",\n \"timeout\",\n \"random\",\n \"seed\",\n \"verbose\"\n ]\n return optparser, optkeys", "def arg_parse():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-m\", \"--mix\", required=False, help=\"cube shuffle\")\n parser.add_argument(\"-e\", \"--explain\", action=\"store_true\", help=\"Get more explanation about steps\")\n options = parser.parse_args()\n return options", "def _build_arg_parser():\n parser = argparse.ArgumentParser(\n description=_description,\n add_help=True,\n )\n add_generic_args(parser)\n add_diff_args(parser)\n add_filename_args(parser, [\"base\", \"remote\"])\n\n parser.add_argument(\n '-o', '--output',\n default=None,\n help=\"if supplied, the diff is written to this file. \"\n \"Otherwise it is printed to the terminal.\")\n\n return parser", "def create_option_parser():\n from optparse import OptionParser\n usage='Usage: %prog [<options>] <bilingual file> <language tag 1> <language tag 2>'\n parser = OptionParser(usage=usage)\n\n parser.add_option(\n '-u', '--create-tuning',\n dest='tuning',\n help='Specify percentage of corpus to be used for tuning corpus.',\n default=0\n )\n parser.add_option(\n '-e', '--create-evaluation',\n dest='eval',\n help='Specify percentage of corpus to be used for tuning corpus.',\n default=0\n )\n return parser", "def getArgumentParser():\n parser = argparse.ArgumentParser(description=\"Script for running optimization for the ZH dark photon SR\")\n parser.add_argument('-i',\n '--infile',\n dest='infile',\n help='Input CSV file',\n default = '/afs/cern.ch/work/s/ssevova/public/dark-photon-atlas/zhdarkphotonml/samples/v09/mc16d_v09_samples.csv')\n parser.add_argument('-o',\n '--output',\n dest='outdir',\n help='Output directory for plots, selection lists, etc',\n default='outdir')\n parser.add_argument('--plotInputs',action='store_true', help='Plot scaled train & test inputs')\n parser.add_argument('--plotOutputs',action='store_true', help='Plot scaled test outputs for given probability range')\n parser.add_argument('--lower',help='Lower limit for conditional filtering')\n parser.add_argument('--upper',help='Upper limit for conditional filtering')\n\n return parser", "def get_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', type=str)\n parser.add_argument('--method', type=str)\n parser.add_argument('--size_part', type=float, default=None)\n parser.add_argument('--start', type=int, default=0)\n parser.add_argument('--count', type=int, default=None)\n return parser", "def parse_arguments():\n parser = ArgumentParser(description=\"Run tests in parallel.\")\n parser.add_argument(\n \"-d\", \"--debug\", action=\"store_true\", help=\"Enable debug logging\"\n )\n parser.add_argument(\n \"-l\", \"--layer\", help=\"Greedy match test layer name.\", action=\"append\"\n )\n parser.add_argument(\n \"-m\", \"--module\", help=\"Greedy match module name.\", action=\"append\"\n )\n return parser.parse_args()", "def build_parser(usage, **kwargs):\n return BetterArgumentParser(usage=usage, version=VERSION, **kwargs)", "def create_arg_parser():\n server_modes = ['builtin', 'waitress']\n\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('h', metavar='HOST', help='Server HOST (e.g. \"localhost\")', type=str)\n parser.add_argument('p', metavar='PORT', help='Server PORT (e.g. \"5001\")', type=int)\n parser.add_argument('m', metavar='SERVER_MODE', help=\", \".join(server_modes), choices=server_modes, type=str)\n parser.add_argument('--debug', help=\"Run builtin server in debug mode\", action='store_true', default=False)\n\n return parser", "def _create_argument_parser():\n\n parser = argparse.ArgumentParser(\n description=\"Execute a CPAchecker run in the VerifierCloud using the web interface.\"\n + \" Command-line parameters can additionally be read from a file if file name prefixed with '@' is given as argument.\",\n fromfile_prefix_chars=\"@\",\n add_help=False, # conflicts with -heap\n )\n\n parser.add_argument(\"-h\", \"--help\", action=\"help\", help=\"Prints this help.\")\n\n parser.add_argument(\n \"--cloudMaster\",\n dest=\"cloud_master\",\n default=\"https://vcloud.sosy-lab.org/cpachecker/webclient/\",\n metavar=\"HOST\",\n help=\"Sets the webclient host of the VerifierCloud instance to be used.\",\n )\n\n parser.add_argument(\n \"--cloudPriority\",\n dest=\"cloud_priority\",\n metavar=\"PRIORITY\",\n help=\"Sets the priority for this benchmark used in the VerifierCloud. Possible values are IDLE, LOW, HIGH, URGENT.\",\n )\n\n parser.add_argument(\n \"--cloudCPUModel\",\n dest=\"cpu_model\",\n type=str,\n default=None,\n metavar=\"CPU_MODEL\",\n help=\"Only execute runs in the VerifierCloud on CPU models that contain the given string.\",\n )\n\n parser.add_argument(\n \"--cloudUser\",\n dest=\"cloud_user\",\n metavar=\"USER:PWD\",\n help=\"The user and password for the VerifierCloud.\",\n )\n\n parser.add_argument(\n \"--revision\",\n dest=\"revision\",\n metavar=\"BRANCH:REVISION\",\n help=\"The svn revision of CPAchecker to use.\",\n )\n\n parser.add_argument(\n \"-d\", \"--debug\", action=\"store_true\", help=\"Enable debug output\"\n )\n\n parser.add_argument(\n \"-o\",\n \"--outputpath\",\n dest=\"output_path\",\n type=str,\n default=DEFAULT_OUTPUT_PATH,\n help=\"Output prefix for the generated results. \"\n + \"If the path is a folder files are put into it,\"\n + \"otherwise it is used as a prefix for the resulting files.\",\n )\n parser.add_argument(\n \"--resultFilePattern\",\n dest=\"result_file_pattern\",\n type=str,\n default=\"**\",\n help=\"Only files matching this glob pattern are transported back to the client.\",\n )\n\n parser.add_argument(\n \"-T\",\n \"--timelimit\",\n dest=\"timelimit\",\n default=None,\n type=util.parse_timespan_value,\n help=\"Time limit in seconds\",\n metavar=\"SECONDS\",\n )\n\n parser.add_argument(\n \"-M\",\n \"--memorylimit\",\n dest=\"memorylimit\",\n default=None,\n type=util.parse_memory_value,\n help=\"Memory limit\",\n metavar=\"BYTES\",\n )\n\n parser.add_argument(\n \"-c\",\n \"--corelimit\",\n dest=\"corelimit\",\n type=int,\n default=None,\n metavar=\"N\",\n help=\"Limit the tool to N CPU cores.\",\n )\n\n parser.add_argument(\n \"--version\", action=\"version\", version=\"%(prog)s \" + __version__\n )\n return parser", "def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('url', help='url to scrape')\n return parser", "def argParser():\n parser = ArgumentParser(description=('Downloads problems from Project Euler'\n ' and saves copies locally.'))\n parser.add_argument('-s', '--start', type=int, default=1,\n help='The problem number to start the downloads at, default 1.')\n parser.add_argument('-e', '--end', type=int, default=None,\n help='The problem number to end the downloads at, default None.')\n return parser", "def parser(cls, *, with_showtb=False):\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument('-v', '--verbose', action='count', default=0,\n help='produce more output')\n parser.add_argument('-q', '--quiet', action='count', default=0,\n help='produce less output')\n parser.add_argument('--dry-run', dest='dryrun', action='store_true',\n default=False, help='do not actually make changes')\n\n if with_showtb:\n parser.add_argument('--traceback', action='store_true',\n default=False, help='do not hide tracebacks')\n\n return parser", "def _make_argument_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(prog=\"pyrasaeco-render\", description=__doc__)\n subparsers = parser.add_subparsers(help=\"Commands\", dest=\"command\")\n subparsers.required = True\n\n once = subparsers.add_parser(\n \"once\", help=\"Render once the scenarios and the scenario ontology\"\n )\n\n continuously = subparsers.add_parser(\n \"continuously\",\n help=\"Re-render continuously the scenarios and the scenario ontology\",\n )\n\n continuously.add_argument(\n \"-p\",\n \"--port\",\n help=\"Port on which the demo server should listen to.\\n\\n\"\n \"If not specified, the demo server will not be started.\",\n type=int,\n )\n\n for command in [once, continuously]:\n command.add_argument(\n \"-s\",\n \"--scenarios_dir\",\n help=\"Directory where scenarios reside\\n\\n\"\n \"The rendering artefacts will be produced in-place in this directory.\",\n required=True,\n )\n\n return parser", "def __create_parser_arguments(parser: argparse.ArgumentParser):\n parser.add_argument('-p', '--number-processes', type=int, default=4,\n help='specify the number of processes used')\n parser.add_argument('-s', '--measurement-strategy', type=str,\n default=MeasurementStrategy.classic.value,\n choices=(MeasurementStrategy.classic.aliases() +\n MeasurementStrategy.anticipated.aliases() +\n MeasurementStrategy.aggressive.aliases() +\n MeasurementStrategy.forced.aliases()),\n help='''\nThe measurement strategy:\n\n- anticipated and aggressive: are basically the same and perform measurements even though a measurement resulting in a not reachable target exists\n- classic: does not perform a measurement in the above case\n- forced: always perform a measurement\n ''')\n parser.add_argument('-n', '--domain-block-limit', type=int, default=1000,\n help='The number of domains taken per block to process them')\n parser.add_argument('-q', '--ripe-request-limit', type=int,\n help='How many request should normally be allowed per second '\n 'to the ripe server', default=25)\n parser.add_argument('-b', '--ripe-request-burst-limit', type=int,\n help='How many request should at maximum be allowed per second'\n ' to the ripe server', default=40)\n parser.add_argument('-ml', '--measurement-limit', type=int,\n help='The amount of parallel RIPE Atlas measurements allowed',\n default=100)\n parser.add_argument('-ak', '--api-key', type=str,\n help='The RIPE Atlas Api key',\n default='1dc0b3c2-5e97-4a87-8864-0e5a19374e60')\n parser.add_argument('--bill-to', type=str,\n help='The RIPE Atlas Bill to address')\n parser.add_argument('-o', '--without-new-measurements', action='store_true',\n help='Evaluate the matches using only data/measurements already available '\n 'locally and remote')\n parser.add_argument('-ma', '--allowed-measurement-age', type=int, default=30*24*60*60,\n help='The allowed measurement age in seconds (Default 30 days)')\n parser.add_argument('-bt', '--buffer-time', type=float, default=constants.DEFAULT_BUFFER_TIME,\n help='The assumed amount of time spent in router buffers')\n parser.add_argument('-mp', '--measurement-packets', type=int, default=1,\n help='Amount of packets per measurement')\n parser.add_argument('-e', '--use-efficient-probes', action='store_true',\n help='sort probes after second hop latency and use the most efficient ones')\n parser.add_argument('-mt', '--probes-per-measurement', default=1, type=int,\n help='Maximum amount of probes used per measurement')\n parser.add_argument('-dpf', '--disable-probe-fetching', action='store_true',\n help='Debug argument to prevent getting ripe probes')\n parser.add_argument('--include-ip-encoded', action='store_true',\n help='Search also domains of type IP encoded')\n parser.add_argument('--stop-without-old-results', action='store_true',\n help='Do not measure for domains if there is no existing measurement')\n parser.add_argument('--endless-measurements', action='store_true',\n help='Should the list of IPs be reapeatedly scanned until the process is '\n 'closed')\n parser.add_argument('--random-domains', action='store_true',\n help='Select the domains to measure randomly')\n parser.add_argument('--debug', action='store_true', help='Use only one process and one thread')\n parser.add_argument('-l', '--log-file', type=str, default='check_locations.log',\n help='Specify a logging file where the log should be saved')\n parser.add_argument('-ll', '--log-level', type=str, default='INFO',\n choices=['NOTSET', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],\n help='Set the preferred log level')\n parser.add_argument('-dbn', '--database-name', type=str, default='hloc-measurements')\n parser.add_argument('--ip-filter-file', type=str,\n help='The file with the IPs which should be validated. '\n 'Only IPs which also have a domain entry in the database are '\n 'considered')", "def argument_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--exp', type=str, default=\"Iris\",\n choices=[\"Iris\", \"BreastCancer\", \"Cifar10\"])\n parser.add_argument('--setting', type=int, default=1,\n choices=[1, 2, 3])\n return parser.parse_args()", "def get_args():\n parser = argparse.ArgumentParser(description=\"Arguments for data exploration\")\n parser.add_argument(\"--tokenize\",\n dest=\"tokenize\",\n action=\"store_true\",\n help=\"Tokenize by words and sentences, counting averages/sd for each.\")\n return parser", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('n_iter',\n help='number of iteration',\n type=int)\n parser.add_argument('n_processes',\n help='number of processes',\n type=int)\n parser.add_argument('method',\n help='mutual exclusion method')\n parser.add_argument('duration',\n help='Duration of each process',\n type=float)\n return parser.parse_args()", "def _createOptionParser():\n usage = \\\n\"\"\"%prog [options] outputFile\n\nMines a large number of concise wine reviews from an online web site, and dumps\nthem to the given filename.\"\"\"\n\n parser = optparse.OptionParser(usage)\n\n parser.add_option('--debug', action='store_true', dest='debug',\n default=False, help='Enables debugging mode [False]')\n\n return parser", "def initCmdLineParser():\n\n # Init parser and all general flags\n logging.debug(\"initiating command line option parser\")\n usage = \"usage: %prog [options]\"\n parser = OptionParser(usage)\n parser.add_option(\"--gen-answer-file\", help=\"Generate a template of an answer file, using this option excludes all other option\")\n parser.add_option(\"--answer-file\", help=\"Runs the configuration in none-interactive mode, extracting all information from the \\\n configuration file. using this option excludes all other option\")\n parser.add_option(\"--no-mem-check\", help=\"Disable minimum memory check\", action=\"store_true\", default=False)\n\n # For each group, create a group option\n for group in controller.getAllGroups():\n groupParser = OptionGroup(parser, group.getKey(\"DESCRIPTION\"))\n\n for param in group.getAllParams():\n cmdOption = param.getKey(\"CMD_OPTION\")\n paramUsage = param.getKey(\"USAGE\")\n optionsList = param.getKey(\"OPTION_LIST\")\n useDefault = param.getKey(\"USE_DEFAULT\")\n if not useDefault:\n if optionsList:\n groupParser.add_option(\"--%s\" % cmdOption, metavar=optionsList, help=paramUsage, choices=optionsList)\n else:\n groupParser.add_option(\"--%s\" % cmdOption, help=paramUsage)\n\n # Add group parser to main parser\n parser.add_option_group(groupParser)\n\n return parser", "def setup_options_parser(self, argparser):\n pass", "def setParser():\n parser = argparse.ArgumentParser(\n prog=\"Nussinov Algorithm Solver\",\n description=\"A program that runs Nussinov's Algorithm on a given RNA strand and returns the most viable pairings.\"\n )\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument(\"-f\", \"--filepath\", help=\"the path to a text file with a sequence\")\n group.add_argument(\"-s\", \"--sequence\", help=\"the RNA sequence to evaluate\")\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"More verbose output\")\n parser.add_argument(\"-u\", \"--uncommon\", action=\"store_true\", help=\"Use Uncommon RNA matches (G,U)\")\n return parser", "def parse_arguments():\n\n parser = argparse.ArgumentParser()\n\n # Specify if one wants to perform the persistence forecast\n parser.add_argument(\n '-p',\n '--persistence-forecast',\n action='store_true',\n help='if set, perform persistence forecast'\n )\n\n # Specify if one wants to look for the best epoch value\n parser.add_argument(\n '-e',\n '--epoch',\n action='store_true',\n help='if set, look for the best epoch value'\n )\n\n return parser", "def create_arguments_parser():\n description = \"Statically analyse SBML files for modelling errors\"\n parent_arg_parser = rate_checker_sbml.create_arguments_parser()\n parser = argparse.ArgumentParser(description=description,\n parents=[parent_arg_parser])\n return parser", "def make_parser():\n\n parser = ArgumentParser(description=\"Create dummy sensor stream esque data\")\n parser.add_argument('--tuples-per-emit', '-t', type=int, default=1,\n help='number of tuples to emit at once')\n parser.add_argument('--sensors', '-s', type=int, default=1,\n help='number of sensors to generate')\n\n return parser", "def setup_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-u\", \"--url\", dest='url', required=True,\n help=\"Falkonry Edge URL\")\n parser.add_argument(\"-i\", \"--input_file\", dest='input', required=True,\n help=\"Input data file to feed into Falkonry Edge Analyzer\")\n parser.add_argument(\"-o\", \"--output_file\", dest='output', required=True,\n help=\"File name to write Falkonry Edge Analyzer output\")\n parser.add_argument(\"-t\", \"--time_column\", dest='time', type=int, required=True,\n help=\"Time column index starting with 0\")\n parser.add_argument(\"-z\", \"--time_zone\", dest='zone', required=True,\n help=\"Time zone\")\n parser.add_argument(\"-f\", \"--time_format\", dest='format', required=True,\n help=\"Timestamp format\")\n parser.add_argument(\"-e\", \"--entity_column\", dest='entity', type=int,\n help=\"Entity column index starting with 0\")\n parser.add_argument(\"-b\", \"--batch_column\", dest='batch', type=int,\n help=\"Batch column index starting with 0\")\n parser.add_argument(\"-r\", \"--input_feed_rate\", dest='rate', type=int, default=1000,\n help=\"Number of records to send to edge per second.\")\n\n return parser", "def define_command_line_options():\n \n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--T', '--stop_time', type=float, \n default=20.0, help='end time of simulation', \n metavar='t')\n parser.add_argument('--dt', type=float, default=0.1,\n help='timestep for the discrete apporoximation',\n metavar='dt')\n parser.add_argument('--v0', '--initial_condition', type=float,\n default=-0.0, help='initial condition v(0)',\n metavar='v0')\n parser.add_argument('--makeplot', action='store_true',\n help='display plot or not')\n parser.add_argument('--rho', type=float, default=1.0,\n help='air mass density', metavar='rho')\n parser.add_argument('--Cd', type=float, default=1.2,\n help='drag coefficient', metavar='Cd')\n parser.add_argument('--m', '--body_mass', type=float, default=100.,\n help='body mass', metavar='m')\n parser.add_argument('--A', type=float, default=0.5,\n help='body cross sectional area',\n metavar='A')\n parser.add_argument('--tp', type=float, default=-1,\n help='time of parachute deployment', metavar='tp')\n return parser", "def parse_commandline():\n parser = optparse.OptionParser()\n\n parser.add_option(\"-m\", \"--motornum\", default=1, type=int)\n parser.add_option(\"-n\", \"--steps\", default=1000, type=int)\n parser.add_option(\"-a\", \"--angle\", default=2.0, type=float)\n parser.add_option(\"-c\", \"--doCompile\", action=\"store_true\", default=False)\n parser.add_option(\"--doSteps\", action=\"store_true\", default=False)\n parser.add_option(\"--doAngle\", action=\"store_true\", default=False)\n\n opts, args = parser.parse_args()\n\n return opts", "def create_parser():\n parser = OptionParser()\n\n parser.add_option(\"-s\", \"--script\", dest=\"script\", default='pbs.sh', help=\"Output location\")\n parser.add_option(\"-p\", \"--period\", dest=\"period\", default=\"30\", help=\"qstat period\")\n\n parser.set_usage(\"\"\"%prog [options]\"\"\")\n return parser", "def get_test_parser():\n parser = argparse.ArgumentParser(description='Acceptability Test')\n\n parser.add_argument(\"-mf\", \"--model_file\", type=str, help=\"Model file to load\")\n parser.add_argument(\"-vf\", \"--vocab_file\", type=str, help=\"Vocab file to load\")\n parser.add_argument(\"-ef\", \"--embedding_file\", type=str, help=\"Embedding file to load\")\n parser.add_argument(\"-o\", \"--output_file\", type=str, help=\"Output file for model classifications.\")\n parser.add_argument(\"-d\", \"--dataset_path\", type=str, help=\"Test file\")\n parser.add_argument(\"-s\", \"--seed\", type=int, default=11111, help=\"Random seed\")\n parser.add_argument(\"-g\", \"--gpu\", action=\"store_true\", default=False, help=\"Use GPU\")\n parser.add_argument(\"--glove\", action=\"store_true\", default=False,\n help=\"Whether to use GloVE embeddings for models\")\n parser.add_argument(\"-e\", \"--embedding\", type=str, default=\"glove.840B.300d\",\n help=\"Embedding type to be used, select from\" +\n \"http://torchtext.readthedocs.io/en/latest/vocab.html#pretrained-aliases\")\n\n # Preprocess arguments\n parser.add_argument(\"--should_not_preprocess_data\", action=\"store_true\", default=False,\n help=\"Whether to preprocess data? Default: true (Will preprocess)\")\n parser.add_argument(\"--should_not_lowercase\", action=\"store_true\", default=False,\n help=\"Should lowercase data? Default: true (Will lowercase)\")\n parser.add_argument(\"--preprocess_tokenizer\", default='space', type=str,\n help=\"Type of tokenizer to use (space|nltk)\")\n parser.add_argument(\"-cp\", \"--crop_pad_length\", type=int, default=30,\n help=\"Padding Crop length\")\n return parser", "def parse_args(ns=None):\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=textwrap.dedent(\"\"\"\\\n parse parameters per iteration for a given benchmark and plots them\n \n Benchmark:\n This script is designed for VPR, so circuit and channel_width of a benchmark are required.\n The circuit name can be a summary or short name rather than the full name.\n\n Parse file:\n \tThe first line should describe how to match iteration, such as:\n \t\t'(\\d+) (.*) sec (.*) ns'\n \tand the first group will be the matched value of iteration.\n\n \tEach following line in this file should describe 1 parameter to parse as:\n \t<parameter_name>;<options>\n \tfor example,\n \t\t'time (s);log'\n \ton the second line, then with the previous iteration regex, it would match the value before 'sec'\n \tThe parameter order should match the group order that they are supposed to match\n\n Parameter Options:\n \tlog - make the y-axis log scale for this parameter\n \t\"\"\"),\n usage=\"%(prog)s <output_file> <circuit> <channel_width> [OPTIONS]\")\n\n # arguments should either end with _dir or _file for use by other functions\n parser.add_argument(\"output_file\", \n default=\"vpr.out\",\n help=\"output file to parse; default: %(default)s\")\n parser.add_argument(\"-p\", \"--parse_file\",\n default=\"nocongestion_parse.txt\",\n help=\"config file where each line describes 1 parameter to parse as:\\\n \t<parameter_name>;<regex_to_match>\\\n default: %(default)s\")\n parser.add_argument(\"circuit\",\n \t\thelp=\"titan circuit to be parsed, similar to task\")\n parser.add_argument(\"channel_width\",\n \t\ttype=int,\n \t\thelp=\"channel used to route circuit\")\n parser.add_argument(\"--architecture\",\n \t\tdefault=\"stratixiv_arch_timing\",\n \t\thelp=\"architecture used to map circuit to; default: %(default)s\")\n parser.add_argument(\"--resfile_dir\",\n \t\tdefault=\"titan\",\n \t\thelp=\"directory (relative or absolute) of where result files (.pack, .place, .route) are kept\")\n parser.add_argument(\"-r\",\"--param_regex\", \n \t\tnargs='+',\n default=[],\n help=\"additional regular expressions to match on the result file\")\n params = parser.parse_args(namespace=ns)\n params.output_file = os.path.abspath(params.output_file)\n\n resfile_name = os.path.join(params.resfile_dir, '_'.join((params.circuit, params.architecture)))\n print(\"result file base name: \", resfile_name);\n\n setattr(params, 'pack_file', os.path.abspath(resfile_name + '.net'))\n setattr(params, 'place_file', os.path.abspath(resfile_name + '.place'))\n return params;", "def make_parser():\n\n parser = argparse.ArgumentParser(add_help=True)\n\n parser_grp_main = parser.add_argument_group('Arguments')\n\n parser_grp_main.add_argument\n\n parser_grp_main.add_argument(\n \"-i\",\n \"--inp-dir\",\n default = \"out/ln/alias/sst/all_samples\",\n help=\"The folder containing files to tidy.\"\n )\n\n parser_grp_main.add_argument(\n \"-x\",\n \"--xlsx\",\n type=str,\n help=\"The xlsx file containing the metadata to use to find samples and tidy them.\",\n default=\"Sequencing_summary.xlsx\",\n required=False)\n\n parser_grp_main.add_argument(\n \"-b\",\n \"--by-column\",\n nargs='+',\n type=str,\n help=\"The column names from the xlsx file to use to tidy.\",\n default=\"sample_name\",\n required=False)\n \n parser_grp_main.add_argument(\n \"-d\",\n \"--delete\",\n help=\"Delete file only this arg is used. Unsafe. Always run first without this argument and check all files listed to deletion.\",\n default=False,\n type=bool,\n )\n\n return parser", "def get_parser(self):\n parser = argparse.ArgumentParser(description='Short sample app')\n\n parser.add_argument('-a', action=\"store_true\", default=False)\n parser.add_argument('-b', action=\"store\", dest=\"b\")\n parser.add_argument('-c', action=\"store\", dest=\"c\", type=int)\n return parser", "def parse_args():\n parser = ArgumentParser()\n parser.add_argument('--agent1', required=True)\n parser.add_argument('--agent2', required=True)\n parser.add_argument('--num_games', type=int, default=100)\n parser.add_argument('--cards_in_hand', type=int, default=13)\n parser.add_argument('--verbose_mode', type=int, default=1)\n parser.add_argument('--seed', type=int, default=-1)\n\n return parser.parse_args()", "def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('webpage', help='webpage to search')\n\n return parser", "def parse_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--length', required=True, type=int, help='Test length in seconds')\n parser.add_argument('--txs-per-ledger', required=True, type=int, help='Transaction rate to submit (spam) in parallel for every ledger round')\n parser.add_argument('--prioritizer-seeds-file', required=True, type=str, help='File path to prioritizer seeds file')\n parser.add_argument('--spammer-seeds-file', required=True, type=str, help='File path to spammer seeds file')\n parser.add_argument('--out', default='spam-results-{}.json'.format(str(int(time.time()))), type=str, help='Spam results JSON output')\n parser.add_argument('--avg-block-time', type=int, default=5, help='Average block time. Controls the time delay between every spam round and the one just after that')\n\n parser.add_argument('--passphrase', type=str, help='Network passphrase')\n parser.add_argument('--horizon', action='append',\n help='Horizon endpoint URL (use multiple --horizon flags for multiple addresses)')\n\n return parser.parse_args()", "def parser(cls, *args, **kwargs):\n\n parser = ArgumentParser(*args, **kwargs)\n parser.add_argument('-a', \"--address\",\n help=\"Force entry point address\", default=None)\n parser.add_argument('-b', \"--dumpblocs\", action=\"store_true\",\n help=\"Log disasm blocks\")\n parser.add_argument('-z', \"--singlestep\", action=\"store_true\",\n help=\"Log single step\")\n parser.add_argument('-d', \"--debugging\", action=\"store_true\",\n help=\"Debug shell\")\n parser.add_argument('-g', \"--gdbserver\", type=int,\n help=\"Listen on port @port\")\n parser.add_argument(\"-j\", \"--jitter\",\n help=\"Jitter engine. Possible values are: gcc (default), tcc, llvm, python\",\n default=\"gcc\")\n parser.add_argument(\n '-q', \"--quiet-function-calls\", action=\"store_true\",\n help=\"Don't log function calls\")\n parser.add_argument('-i', \"--dependencies\", action=\"store_true\",\n help=\"Load PE and its dependencies\")\n\n for base_cls in cls._classes_():\n base_cls.update_parser(parser)\n return parser", "def init_parser():\n parser = OptionParser()\n parser.add_option(\"-n\", \"--interactive\", action=\"store_true\", help=\"run in interactive (non-daemon) mode\")\n parser.add_option(\"-r\", \"--run\", action=\"store_true\", help=\"starts process identified by -app parameter\")\n parser.add_option(\"-k\", \"--kill\", action=\"store_true\", help=\"kill process identified by -app parameter\")\n parser.add_option(\"-a\", \"--app\", action=\"store\", help=\"application to start (process name)\")\n parser.add_option(\"-q\", \"--query\", action=\"store_true\", help=\"query application's state\")\n parser.add_option(\"-i\", \"--install_ve\", action=\"store_true\", help=\"install a virtualenv for the runtime to use\")\n parser.add_option(\"-s\", \"--shell\", action=\"store_true\", help=\"run an ipython shell within the virtualenv\")\n parser.add_option(\"-t\", \"--tests\", action=\"store_true\", help=\"run tests\")\n parser.add_option(\"-x\", \"--xunit\", action=\"store_true\", help=\"run tests with coverage and xunit output for Jenkins\")\n parser.add_option(\"-z\", \"--analyze\", action=\"store_true\", help=\"run pylint on project\")\n parser.add_option(\"-l\", \"--list\", action=\"store_true\", help=\"list available applications\")\n parser.add_option(\"-o\", \"--outfile\", action=\"store\", help=\"save results from a report to a file\")\n return parser", "def create_parser():\n parser = argparse.ArgumentParser(\n description=\"First example\",\n epilog=\"Batch 2017\")\n\n # script\n parser.add_argument('--script',\n required=True,\n action='store',\n dest='script',\n help=\"A script to execute\")\n\n parser.add_argument('--dataset',\n required=True,\n action='store',\n dest='dataset',\n help=\"A dataset to use\")\n#\n# parser.add_argument('--features',\n# required=True,\n# action='store',\n# dest='features',\n# help=\"Number of features\")\n return parser", "def parse_args():\n parser = ArgumentParser()\n parser.add_argument('-t', '--timer', action='store_true', \\\n help='Time the first random generation')\n parser.add_argument('-i', '--ibmq', default='', help='IBMQ token')\n parser.add_argument('-b', '--backend', default='', help='IBMQ backend')\n return parser.parse_args()", "def get_argparser():\r\n parser = ArgumentParser(description='Output a confusion matrix computed '\r\n 'over one or more true/pred .npz '\r\n 'files.')\r\n parser.add_argument(\"--true_pattern\", type=str,\r\n default=\"split*/predictions/test_data/dataset_1/files/*/true.npz\",\r\n help='Glob-like pattern to one or more .npz files '\r\n 'storing the true labels')\r\n parser.add_argument(\"--pred_pattern\", type=str,\r\n default=\"split*/predictions/test_data/dataset_1/files/*/pred.npz\",\r\n help='Glob-like pattern to one or more .npz files '\r\n 'storing the true labels')\r\n parser.add_argument(\"--normalized\", action=\"store_true\",\r\n help=\"Normalize the CM to show fraction of total trues\")\r\n parser.add_argument(\"--show_pairs\", action=\"store_true\",\r\n help=\"Show the paired files (for debugging)\")\r\n parser.add_argument(\"--group_non_rem\", action=\"store_true\",\r\n help=\"Group all non-rem stages (N1, N2, N3) into one.\")\r\n parser.add_argument(\"--round\", type=int, default=3,\r\n help=\"Round float numbers, only applicable \"\r\n \"with --normalized.\")\r\n parser.add_argument(\"--wake_trim_min\", type=int, required=False,\r\n help=\"Only evaluate on within wake_trim_min of wake \"\r\n \"before and after sleep, as determined by true \"\r\n \"labels\")\r\n parser.add_argument(\"--period_length_sec\", type=int, default=30,\r\n help=\"Used with --wake_trim_min to determine number of\"\r\n \" periods to trim\")\r\n parser.add_argument(\"--ignore_classes\", type=int, nargs=\"+\", default=None,\r\n help=\"Optional space separated list of class integers to ignore.\")\r\n return parser", "def makeParser():\n parser = argparse.ArgumentParser(\n description=(\n \"Print a JSON object containing reference to read \"\n \"distances extracted from a SAM file.\"\n )\n )\n\n parser.add_argument(\n \"--samFile\",\n action=\"append\",\n required=True,\n help=\"The SAM file(s) to load. May be repeated.\",\n )\n\n parser.add_argument(\n \"--minMatchingReads\",\n type=int,\n help=(\n \"The minimum number of reads that must match a reference for it \"\n \"to be included.\"\n ),\n )\n\n parser.add_argument(\n \"--scoreTag\",\n help=(\n \"The score tag to use for the alignment score. If not given, \"\n \"1 will be used to indicate that a read matched a reference \"\n \"(non-matches are not included). The default is no score tag, \"\n 'which is not that useful. A good choice is \"AS\", for the '\n \"alignment score, but that has to be present in the SAM file, \"\n \"which means that the aligner (bowtie2, bwa, etc. has to have \"\n \"produced such a tag.\"\n ),\n )\n\n parser.add_argument(\n \"--verbose\", action=\"store_true\", help=\"Print extra information.\"\n )\n\n return parser", "def create_arg_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-a', '--path_annots', type=str, required=False,\n help='path to folder with annotations',\n default='annotations')\n parser.add_argument('-i', '--path_dataset', type=str, required=False,\n help='path to folder with dataset (images)',\n default='dataset')\n parser.add_argument('-o', '--path_output', type=str, required=False,\n help='path to the output directory - visualisation',\n default='output')\n parser.add_argument('--consensus', type=str, required=False,\n help='method for consensus landmarks',\n choices=['mean', 'median'], default='mean')\n parser.add_argument('--visual', required=False, action='store_true',\n help='export co-annotation visualisation', default=False)\n parser.add_argument('--nb_jobs', type=int, required=False,\n help='number of processes in parallel',\n default=NB_THREADS)\n return parser", "def mujoco_arg_parser():\n parser = arg_parser()\n parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2')\n parser.add_argument('--seed', help='RNG seed', type=int, default=0)\n parser.add_argument('--num-timesteps', type=int, default=int(1e6))\n parser.add_argument('--play', default=False, action='store_true')\n return parser", "def atari_arg_parser():\n parser = arg_parser()\n parser.add_argument('--env', help='environment ID', default='BreakoutNoFrameskip-v4')\n parser.add_argument('--seed', help='RNG seed', type=int, default=0)\n parser.add_argument('--num-timesteps', type=int, default=int(10e6))\n return parser", "def build_argparser():\n parser = ArgumentParser()\n parser.add_argument(\"-m\", \"--model\", required=True, type=str,\n help=\"Path to an xml file with a trained model.\")\n parser.add_argument(\"-i\", \"--input\", required=True, type=str,\n help=\"Path to image or video file\")\n parser.add_argument(\"-l\", \"--cpu_extension\", required=False, type=str,\n default=None,\n help=\"MKLDNN (CPU)-targeted custom layers.\"\n \"Absolute path to a shared library with the\"\n \"kernels impl.\")\n parser.add_argument(\"-d\", \"--device\", type=str, default=\"CPU\",\n help=\"Specify the target device to infer on: \"\n \"CPU, GPU, FPGA or MYRIAD is acceptable. Sample \"\n \"will look for a suitable plugin for device \"\n \"specified (CPU by default)\")\n parser.add_argument(\"-pt\", \"--prob_threshold\", type=float, default=0.3,\n help=\"Probability threshold for detections filtering\"\n \"(0.3 by default)\")\n return parser", "def build_argparser():\n parser = ArgumentParser()\n parser.add_argument(\"-m\", \"--model\", required=True, type=str,\n help=\"Path to an xml file with a trained model.\")\n parser.add_argument(\"-i\", \"--input\", required=True, type=str,\n help=\"Path to image or video file\")\n parser.add_argument(\"-l\", \"--cpu_extension\", required=False, type=str,\n default=None,\n help=\"MKLDNN (CPU)-targeted custom layers.\"\n \"Absolute path to a shared library with the\"\n \"kernels impl.\")\n parser.add_argument(\"-d\", \"--device\", type=str, default=\"CPU\",\n help=\"Specify the target device to infer on: \"\n \"CPU, GPU, FPGA or MYRIAD is acceptable. Sample \"\n \"will look for a suitable plugin for device \"\n \"specified (CPU by default)\")\n parser.add_argument(\"-pt\", \"--prob_threshold\", type=float, default=0.5,\n help=\"Probability threshold for detections filtering\"\n \"(0.5 by default)\")\n return parser", "def build_argparser():\n parser = ArgumentParser()\n parser.add_argument(\"-m\", \"--model\", required=True, type=str,\n help=\"Path to an xml file with a trained model.\")\n parser.add_argument(\"-i\", \"--input\", required=True, type=str,\n help=\"Path to image or video file\")\n parser.add_argument(\"-l\", \"--cpu_extension\", required=False, type=str,\n default=None,\n help=\"MKLDNN (CPU)-targeted custom layers.\"\n \"Absolute path to a shared library with the\"\n \"kernels impl.\")\n parser.add_argument(\"-d\", \"--device\", type=str, default=\"CPU\",\n help=\"Specify the target device to infer on: \"\n \"CPU, GPU, FPGA or MYRIAD is acceptable. Sample \"\n \"will look for a suitable plugin for device \"\n \"specified (CPU by default)\")\n parser.add_argument(\"-pt\", \"--prob_threshold\", type=float, default=0.5,\n help=\"Probability threshold for detections filtering\"\n \"(0.5 by default)\")\n return parser", "def build_argparser():\n parser = ArgumentParser()\n parser.add_argument(\"-m\", \"--model\", required=True, type=str,\n help=\"Path to an xml file with a trained model.\")\n parser.add_argument(\"-i\", \"--input\", required=True, type=str,\n help=\"Path to image or video file\")\n parser.add_argument(\"-l\", \"--cpu_extension\", required=False, type=str,\n default=None,\n help=\"MKLDNN (CPU)-targeted custom layers.\"\n \"Absolute path to a shared library with the\"\n \"kernels impl.\")\n parser.add_argument(\"-d\", \"--device\", type=str, default=\"CPU\",\n help=\"Specify the target device to infer on: \"\n \"CPU, GPU, FPGA or MYRIAD is acceptable. Sample \"\n \"will look for a suitable plugin for device \"\n \"specified (CPU by default)\")\n parser.add_argument(\"-pt\", \"--prob_threshold\", type=float, default=0.5,\n help=\"Probability threshold for detections filtering\"\n \"(0.5 by default)\")\n return parser", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Bandits algorithms on a click-through \"\n \"rate dataset.\")\n parser.add_argument('--plot', action='store_true')\n return parser.parse_args()", "def set_parser():\n\n print('\\n\\nLoading Options and Configurations\\n' + 72 * '~' + '\\n')\n parser = ArgumentParser( \\\n description=('''\\n\nUsed to validate the TELEMAC system against a benchmark of test cases for\na certain rank, and a certain tag'''))\n\n parser = add_runcode_argument(parser)\n parser.add_argument( \\\n \"-b\", \"--bypass\", action=\"store_true\", dest=\"bypass\", default=False,\n help=\"will bypass execution failures and try to carry on \"\\\n \"(final report at the end)\")\n # Combine with all filters above, \"rank\" now controls everything\n # and Jenkins can control \"rank\"\n parser.add_argument( \\\n \"-k\", \"--rank\", dest=\"rank\", type=int, default=4,\n help=\"specify the ranks to be validated all rank lower or equal to \"\n \"the value will be run\")\n parser.add_argument( \\\n \"--tags\", dest=\"tags\", default='all',\n help=\\\n \"specify tags (; separated) to run \"\\\n \" '-tag' will do the opposite and \"\\\n \"tag1+tag2 will run cases that has both tag1 and tag2), \"\\\n \"default is all of them\")\n parser.add_argument( \\\n \"--valrootdir\", dest=\"val_root\", default='',\n help=\"specify the directory in which to search the validation cases, \"\\\n \"default is taken from config file\")\n parser.add_argument( \\\n \"--vnv-pre\", action=\"store_true\", dest=\"vnv_pre\", default=False,\n help=\"Only do pre-treatment\")\n parser.add_argument( \\\n \"--vnv-run\", action=\"store_true\", dest=\"vnv_run\", default=False,\n help=\"Only do execution for each study\")\n parser.add_argument( \\\n \"--vnv-check\", action=\"store_true\", dest=\"vnv_check\", default=False,\n help=\"Only do check of results (epsilons)\")\n parser.add_argument( \\\n \"--vnv-post\", action=\"store_true\", dest=\"vnv_post\", default=False,\n help=\"Only do post-treatment\")\n parser.add_argument( \\\n \"--report-name\", dest=\"report_name\", default='',\n help=\"will create a csv containing information on the validation \"\\\n \"such as execution time, rank, if it passed...\")\n parser.add_argument( \\\n \"--clean\", action=\"store_true\", dest=\"cleanup\", default=False,\n help=\"will erase all object, executable, result files \"\\\n \"from subfolders for the actual configuration\")\n parser.add_argument( \\\n \"--full-clean\", action=\"store_true\", dest=\"full_cleanup\", default=False,\n help=\"will erase all vnv study folders regarding of configurations\")\n\n # Options for notebook\n parser.add_argument(\n \"--notebook\",\n dest=\"notebook\",\n action=\"store_true\", default=False,\n help=\"Run validation of notebook\")\n parser.add_argument(\n \"--notebook-timeout\",\n dest=\"nb_timeout\", type=int, default=60000,\n help=\"Time after whihc the notebook will be killed if still running\")\n parser.add_argument(\n \"--notebook-update\",\n dest=\"nb_update\",\n action=\"store_true\", default=False,\n help=\"Update notebook file with the runned one\")\n parser.add_argument(\n \"--verbose\",\n dest=\"verbose\",\n action=\"store_true\", default=False,\n help=\"More verbose validation\")\n\n # Options for api\n parser.add_argument(\n \"--api\",\n dest=\"api\",\n action=\"store_true\", default=False,\n help=\"Run validation of api\")\n\n parser.add_argument(\"args\", metavar='Python file(s)', nargs='*')\n options = parser.parse_args()\n\n # Conversion of options.tags (replacing all by list) and checking that the\n # value is valid\n # Removing quotes\n tmp_tag = options.tags.strip(\"'\\\"\")\n options.tags = tmp_tag\n # Checking that tags are valid\n for tag in options.tags.split(';'):\n if '+' in tag:\n for and_tag in tag.split('+'):\n # Removing - if in tag\n ttag = and_tag[1:] if and_tag[0] == '-' else and_tag\n if ttag not in TAGS:\n raise TelemacException(\\\n \"Unknow tag: {tag}\\nTags available: {tags}\"\\\n .format(tag=ttag, tags=';'.join(TAGS)))\n else:\n if tag == 'all':\n continue\n # Removing - if in tag\n ttag = tag[1:] if tag[0] == '-' else tag\n if ttag not in TAGS:\n raise TelemacException(\\\n \"Unknow tag: {tag}\\nTags available: {tags}\"\\\n .format(tag=ttag, tags=';'.join(TAGS)))\n\n # Replacing all by list of tags\n if 'all' in options.tags.split(';'):\n options.tags = options.tags.replace('all', ';'.join(TAGS))\n\n # If pre, run, post are all false switching them to true\n if not(options.vnv_pre or options.vnv_run or\n options.vnv_check or options.vnv_post):\n options.vnv_pre = True\n options.vnv_run = True\n options.vnv_check = True\n options.vnv_post = True\n\n return options", "def get_base_argument_parser(\n **kwargs\n) -> ArgumentParser:\n\n parser = ArgumentParser(\n allow_abbrev=False,\n add_help=False,\n **kwargs\n )\n\n parser.add_argument(\n '--help',\n action='store_true',\n help='Pass this flag to print usage and argument descriptions.'\n )\n\n parser.add_argument(\n '--log',\n choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],\n help='Logging level.'\n )\n\n return parser", "def create_basic_parser(name=''):\n # SEE: https://docs.python.org/3/library/argparse.html\n parser = argparse.ArgumentParser('Benchmark on Image Registration - %s' % name)\n parser.add_argument('-n', '--name', type=str, required=False, default=None, help='custom experiment name')\n parser.add_argument('-t', '--path_table', type=str, required=True, help='path to the csv cover file')\n parser.add_argument(\n '-d',\n '--path_dataset',\n type=str,\n required=False,\n default=None,\n help='path to the dataset location, if missing in table'\n )\n parser.add_argument('-o', '--path_out', type=str, required=True, help='path to the output directory')\n parser.add_argument(\n '--unique', dest='unique', action='store_true', help='whether each experiment have unique time stamp'\n )\n parser.add_argument('--visual', dest='visual', action='store_true', help='whether visualise partial results')\n parser.add_argument(\n '-pproc',\n '--preprocessing',\n type=str,\n required=False,\n nargs='+',\n help='use some image pre-processing, the other matter',\n choices=['gray'] + ['matching-%s' % clr for clr in CONVERT_RGB]\n )\n # parser.add_argument('--lock_expt', dest='lock_thread', action='store_true',\n # help='whether lock to run experiment in single thread')\n parser.add_argument('--run_comp_benchmark', action='store_true', help='run computation benchmark on the end')\n parser.add_argument(\n '--nb_workers', type=int, required=False, default=1, help='number of registration running in parallel'\n )\n return parser", "def create_cli_parser():\n\n description = DESCRIPTION(os.path.basename(__file__))\n parser = argparse.ArgumentParser(description=description,\n formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('-j', '--json', action='store', default='diff_summary.json',\n type=str, help='JSON file in which to write diff summary.')\n parser.add_argument('-n', '--num-splits', action='store', default=1,\n type=int, help='Number of horizontal splits (LaTeX only).')\n parser.add_argument('--without-preamble', action='store_true',\n dest='without_preamble', default=False,\n help='Write out only a LaTeX table, for inclusion in a\\nlarger document.')\n parser.add_argument('--vm', action='append', nargs=2, dest='vm', default=[],\n help='Compare one VM against another. \\nRequires two '\n 'VM names as arguments. By default, the\\ndiffer '\n 'will compare all benchmarks / VMs which appear\\n'\n 'in both input files. Users should be aware that this\\n'\n 'option produces a summary file with some special JSON\\n'\n 'keys. This means that summary files (usually\\ndiff_summary.json) '\n 'usually not be suitable for\\ngenerating generic diff tables, or '\n 'tables with different\\n--vm options. In this case, users should '\n 'regenerate the\\nsummary file with the original data and the -r option.')\n outputs = parser.add_mutually_exclusive_group(required=True)\n outputs.add_argument('--tex', action='store', type=str,\n help='LaTeX file in which to write diff summary.')\n outputs.add_argument('--html', action='store', type=str,\n help='HTML file in which to write diff summary.')\n inputs = parser.add_mutually_exclusive_group(required=True)\n inputs.add_argument('-s', '--input-summary', action='store', default=None,\n type=str, help='Read summary data from JSON file rather than '\n 'generating\\nfrom two original results files.')\n inputs.add_argument('-r', '--input-results', nargs=2, action='append', default=[], type=str,\n help='Exactly two Krun result files (with outliers and\\nchangepoints).')\n return parser", "def build_parser ():\n\n parser = argparse.ArgumentParser (description = __doc__)\n\n parser.add_argument (\n '-v', '--verbose', dest='verbose', action='count',\n help='increase output verbosity', default=0\n )\n parser.add_argument (\n '-l', '--live', dest='get_live_data', action='store_true',\n help='get live data from OSM database',\n )\n parser.add_argument (\n '-e', '--edit', action='store_true',\n help='edit the OSM database',\n )\n parser.add_argument (\n '-u', '--user', dest='my_edits', action='store_true',\n help='only report about my edits',\n )\n parser.add_argument (\n '--min-length', dest=\"min_length\", type=float, default=1000.0,\n help='way must be longer than this to get a ref (in m) (default=1000)',\n )\n parser.add_argument (\n '--batch-size', dest=\"batch_size\", type=int, default=10,\n help='apply OSM edits in changesets of this size (default=10)',\n )\n return parser", "def build_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-r', '--reference', required=True, help=\"Reference Genome URL\")\n parser.add_argument('-n', '--normal', required=True, help='Normal BAM URL. Format: UUID.normal.bam')\n parser.add_argument('-t', '--tumor', required=True, help='Tumor BAM URL. Format: UUID.tumor.bam')\n parser.add_argument('-d', '--dbsnp', required=True, help='dbsnp_132_b37.leftAligned.vcf URL')\n parser.add_argument('-c', '--cosmic', required=True, help='b37_cosmic_v54_120711.vcf URL')\n parser.add_argument('-u', '--mutect', required=True, help='Mutect.jar')\n parser.add_argument('-w', '--work_dir', required=True, help='Where you wanna work from? (full path please)')\n\n return parser", "def options():\n parser = ArgumentParser()\n logging = parser.add_argument_group(\"log\")\n logging.add_argument(\n \"--log\",\n dest=\"loglevel\",\n default=\"WARNING\",\n choices=[\"WARNING\", \"INFO\", \"DEBUG\", \"ERROR\"],\n help=\"Set the log level\",\n )\n monitoring = parser.add_argument_group(\"monitoring\")\n monitoring.add_argument(\n \"--monitoring\", action=\"store_true\", help=\"Set the monitoring\"\n )\n mpi = parser.add_argument_group(\"mpi splitting\")\n mpi.add_argument(\n \"-npx\",\n dest=\"npx\",\n default=1,\n type=int,\n help=\"Set the number of processes in x direction\",\n )\n mpi.add_argument(\n \"-npy\",\n dest=\"npy\",\n default=1,\n type=int,\n help=\"Set the number of processes in y direction\",\n )\n mpi.add_argument(\n \"-npz\",\n dest=\"npz\",\n default=1,\n type=int,\n help=\"Set the number of processes in z direction\",\n )\n args, _ = parser.parse_known_args()\n return args", "def setup_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(description='evaluate dynamic shielding with water_tank benchmarks')\n\n parser.add_argument('--steps', type=int, default=int(2e5),\n help='number of steps that each environment is run.')\n parser.add_argument('--learning-rate', type=float, default=1e-3,\n help='learning rate')\n parser.add_argument('--shield', type=str, default='pre-adaptive',\n help='the shield to be used [pre-adaptive (default) | pre-dynamic | safe-padding | '\n 'post-dynamic | no]')\n parser.add_argument('--shield-life', type=int, default=100,\n help='frequency of shield reconstruction in terms of episodes.')\n parser.add_argument('--depths', nargs='+', default=[1, 3, 5, 7],\n help='a list of min-depths for dynamic shield (usage: --depths 0 1 3)')\n parser.add_argument('--penalties', nargs='+', default=[0.0, 1.0, 10.0, 100.0],\n help='a list of penalties that it is used in no shield (usage: --penalties 0.0 1.0 100.0)')\n return parser", "def parse_args():\n parser = OptionParser()\n parser.add_option('--data-file', '-f', default='train_data.hdf5',\n help=\"The path to the data file\")\n parser.add_option('--runs-per-epoch', '-r', type='int',\n help=\"The number of runs per epoch (train samples count)\")\n parser.add_option('--avg-window-size', '-w', default='1', type='int',\n help=\"The window size for moving average\")\n\n (options, args) = parser.parse_args()\n return options", "def get_parser():\n\n parser = argparse.ArgumentParser(description='Training a hybrid control barrier function')\n\n # Dataset paths\n parser.add_argument('--train-data-path', required=False,\n help='Path to training data pickle file')\n parser.add_argument('--test-data-path', required=False,\n help='Path to test data pickle file')\n parser.add_argument('--n-train-rollouts', type=int, default=100,\n help='Number of rollouts to use for training')\n parser.add_argument('--results-dir', type=str, default='./results',\n help='Path to save all outputs')\n\n # optimization settings and training parameters\n parser.add_argument('--neural-net-dims', type=int, nargs='*',\n help='Dimensions of neural network to train')\n parser.add_argument('--optimizer', type=str, default='adam', choices=['sgd', 'adam'], \n help='Optimization algorithm to use')\n parser.add_argument('--learning-rate', type=float, default=0.005, \n help='Learning rate for optimizer')\n parser.add_argument('--momentum', default=0.9, type=float, \n help='Momentum for SGD')\n parser.add_argument('--n-epochs', type=int, default=20000,\n help='Number of epochs for training the HCBF')\n\n # hybrid CBF hyperparameters\n parser.add_argument('--lam-safe', type=float, default=5.0,\n help='Lagrange multiplier for safe states loss')\n parser.add_argument('--lam-unsafe', type=float, default=5.0,\n help='Lagrange multiplier for unsafe states loss')\n parser.add_argument('--lam-cnt', type=float, default=0.5,\n help='Lagrange multiplier for continuous states loss')\n parser.add_argument('--lam-dis', type=float, default=0.5,\n help='Lagrange multiplier for discrete states loss')\n parser.add_argument('--lam-grad', type=float, default=0.01,\n help='Lagrange multiplier for penalty on gradient of h(x)')\n parser.add_argument('--lam-param', type=float, default=0.01,\n help='Lagrange multiplier for penalty on the size of the weights of h(x)')\n parser.add_argument('--gam-safe', type=float, default=0.3,\n help='Margin value for safe loss')\n parser.add_argument('--gam-unsafe', type=float, default=0.3,\n help='Margin value for safe loss')\n parser.add_argument('--gam-cnt', type=float, default=0.05,\n help='Margin value for continuous loss')\n parser.add_argument('--gam-dis', type=float, default=0.05,\n help='Margin value for discrete loss')\n\n # boundary sampling hyperparameters\n parser.add_argument('--min-num-nbrs', type=int, default=200,\n help='Minimum numbers of neighbors for neighbor sampling algorithm')\n parser.add_argument('--nbr-thresh', type=float, default=0.08,\n help='Neighbor threshold for neighbor sampling algorithm')\n\n # compass gait parameters \n # (horizon, dt, and fix_left should match those used to collect the training dataset)\n parser.add_argument('--horizon', type=int, default=750,\n help='Number of steps for each rollout')\n parser.add_argument('--dt', type=float, default=0.01, \n help='Time interval between discrete steps')\n parser.add_argument('--fix-left', action='store_true',\n help='Fix left leg in all initial conditions')\n parser.add_argument('--success-n-steps', type=int, default=5, \n help='Number of steps taken to entail a success')\n\n # other\n parser.add_argument('--report-int', type=int, default=100,\n help='Print frequency (per epoch) for training')\n parser.add_argument('--reload', action='store_true',\n help='Reloads neural network from file if argument is provided.')\n parser.add_argument('--reload-path', type=str, \n help='Path to saved neural network parameters file (.npy)')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n # # paths to various directories\n # parser.add_argument('--save-path', type=str, \n # help='Path for saving outputs')\n # parser.add_argument('--logdir', default='', type=str,\n # help='Directory for tensorboard logs')\n # parser.add_argument('--model-paths', type=str, nargs='*',\n # help=\"Path for model of natural variation\")\n \n # # optimization settings and training parameters\n # parser.add_argument('--half-prec', action='store_true', \n # help='Run model in half-precision mode using apex')\n # parser.add_argument('--apex-opt-level', default='O1', type=str, \n # help='opt_level for Apex amp initialization')\n # parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W', \n # help='weight decay (default: 1e-4)')\n # parser.add_argument('--init-bn0', action='store_true', \n # help='Intialize running batch norm mean to 0')\n # parser.add_argument('--no-bn-wd', action='store_true', \n # help='Remove batch norm from weight decay')\n # parser.add_argument('--momentum', default=0.9, type=float, metavar='M', \n # help='Momentum for SGD')\n # parser.add_argument('--data-size', type=int, default=224, \n # help=\"Size of each image\")\n # parser.add_argument('--batch-size', type=int, default=256, \n # help='Training/validation batch size')\n # parser.add_argument('--delta-dim', type=int, default=2, \n # help=\"dimension of nuisance latent space\")\n\n # # architecture\n # parser.add_argument('--architecture', default='resnet50', type=str, \n # help='Architecture for classifier')\n # parser.add_argument('--pretrained', action='store_true', \n # help='Use pretrained model (only available for torchvision.models)')\n # parser.add_argument('--num-classes', default=1000, type=int, \n # help='Number of classes in datset')\n \n # # dataset\n # parser.add_argument('--dataset', required=True, type=str, choices=['imagenet', 'svhn', 'gtsrb', 'cure-tsr'],\n # help='Dataset to use for training/testing classifier.')\n # parser.add_argument('--source-of-nat-var', type=str, \n # help='Source of natural variation')\n\n # # other parameters\n # parser.add_argument('--print-freq', '-p', default=5, type=int, metavar='N', \n # help='log/print every this many steps (default: 5)')\n # parser.add_argument('--resume', default='', type=str, metavar='PATH',\n # help='path to latest checkpoint (default: none)')\n # parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n # help='evaluate model on validation set')\n # parser.add_argument('--short-epoch', action='store_true', \n # help='make epochs short (for debugging)')\n # parser.add_argument('--setup-verbose', action='store_true', \n # help='Print setup messages to console')\n # parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',\n # help='number of data loading workers (default: 8)')\n # parser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n # help='manual epoch number (useful on restarts)')\n\n args = parser.parse_args()\n\n return args", "def initCmdLineParser():\n\n # Init parser and all general flags\n usage = \"usage: %prog [options] [--help]\"\n parser = OptionParser(usage=usage, version=\"0.1\")\n\n parser.add_option(\"-d\", \"--daemon\", action=\"store_true\", default=False, help=\"daemon mode\")\n parser.add_option(\"-c\", \"--config\", help=\"install config file\", default = 'test.conf')\n parser.add_option(\"-D\", \"--debug\", action=\"store_true\", help=\"debug mode\", default = False)\n\n parser.add_option(\"-a\", \"--add\", action=\"store_true\", help=\"add node to cluster\", default = False)\n parser.add_option(\"-p\", \"--port\", help= \"http server port\", default = '8999')\n\n\n return parser", "def make_cli_parser(self):\n super(SaArgParser, self).make_cli_parser()\n self.cli_parser.add_option('--steps', type='int',\n default=mcmc.defaults.NUM_STEPS,\n help=(\"the number of steps to Anneal. \"\n\t\t\t\t\"[default: %default]\")\n )\n self.cli_parser.add_option('--temperature', type='int',\n default=mcmc.defaults.TEMPERATURE,\n help=(\"the starting temperature to anneal from. \"\n \"[default: %default]\")\n )\n self.cli_parser.add_option('--end_temperature', type='int',\n default=mcmc.defaults.END_TEMPERATURE,\n help=(\"the temperature to end annealing.\"\n \"[default: %default]\")\n )\n self.cli_parser.add_option('--activity-threshold',\n type='float',\n default=mcmc.defaults.ACTIVITY_THRESHOLD,\n help=(\"set the (differential) expression threshold at \"\n \"which a gene is considered active [default: \"\n \"%default=-log10(0.05)]\")\n )\n self.cli_parser.add_option('--free-parameters',\n action='store_true',\n help=(\"parameters will be adjusted randomly, rather \"\n \"than incrementally\")\n )\n self.cli_parser.add_option('--disable-swaps', action='store_true',\n help=(\"disables swapping links as an option for \"\n \"transitions\")\n )\n self.cli_parser.add_option('--transition-ratio', type='float',\n default=0.9,\n help=(\"The target ratio of proposed link transitions \"\n \"to proposed parameter transitions [default: \"\n \"%default]\"\n )\n )\n self.cli_parser.add_option('--parameters-outfile',\n default=mcmc.defaults.PARAMETERS_OUTFILE,\n help=(\"the file to which the parameters results should \"\n \"be written [default: %default]\")\n )\n self.cli_parser.add_option('--transitions-outfile',\n default=mcmc.defaults.TRANSITIONS_OUTTFILE,\n help=(\"the file to which the transitions data should \"\n \"be written [default: %default]\")\n )\n self.cli_parser.add_option('--detailed-transitions',\n action='store_true',\n help=(\"Transitions file includes full information about \"\n \"each step's state.\")\n )\n self.cli_parser.add_option('--bzip2', action='store_true',\n help=\"compress transitions file using bzip2\"\n )", "def setup_args() -> argparse.ArgumentParser:\n main_parser = argparse.ArgumentParser(prog=\"gh\")\n subparsers = main_parser.add_subparsers(dest=\"subparser\")\n command_parser = subparsers.add_parser(\"commands\", help=\"Runs a command\")\n command_parser.add_argument(\n \"choice\",\n help=\"The chosen command to run\",\n choices=gh.commands.OPTIONS.keys(),\n )\n analytics_parser = subparsers.add_parser(\"analytics\", help=\"Runs an analysis\")\n analytics_parser.add_argument(\n \"choice\",\n help=\"The chosen analysis to run\",\n choices=gh.analytics.OPTIONS.keys(),\n )\n return main_parser", "def common_arg_parser():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--log_dir', type=str, default='logs/', help='root folder to save experimental logs.')\n parser.add_argument('--game_name', type=str, default='', help='run one game only.')\n parser.add_argument('--mode', type=str, help='specify which code to run.')\n parser.add_argument('--gpu_ids', default=[0, 1, 2, 3], nargs='+', help='gpu ids to run different games')\n parser.add_argument('--game_groups', default=[0, 1, 2, 3], nargs='+', help='game groups to run')\n parser.add_argument('--seed', type=int, default='0', help='random seeds for those games')\n parser.add_argument('--routine_ablation', type=str, default=\"\", help='Name of the ablated routines.')\n return parser", "def get_argparser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--fast', action='store_true', help='Run on extremely reduced dataset')\n parser.add_argument('--seed', type=int, default=42, help='Random seed')\n parser.add_argument('--split-seed', type=int, default=1234, help='Random seed for train/val split')\n parser.add_argument('-a', '--augmentation', default='none', help='Augmentation used. Possible values: hard, medium, light, safe, none')\n parser.add_argument('-abn', '--abn', default='default', help='Use of activate + batch_norm block. Values: default, inplace, inplace_sync')\n parser.add_argument('-b', '--batch-size', type=int, default=32, help='Batch Size during training, e.g. -b 64')\n parser.add_argument('-bm', '--border-mode', type=str, default='reflect', help='Border mode. Either constant|reflect')\n parser.add_argument('-d', '--dataset', type=str, default='image_only', help='image_only, image_depth, image_cumsum, image_depth_cumsum')\n parser.add_argument('-de', '--drop-empty', action='store_true')\n parser.add_argument('-df', '--drop-few', default=None, type=int)\n parser.add_argument('-dv', '--drop-vstrips', action='store_true')\n parser.add_argument('-e', '--epochs', type=int, default=150, help='Epoch to run')\n parser.add_argument('-es', '--early-stopping', type=int, default=None, help='Maximum number of epochs without improvement')\n parser.add_argument('-f', '--fold', default=None, type=int, help='Fold to train')\n parser.add_argument('-fe', '--freeze-encoder', type=int, default=0, help='Freeze encoder parameters for N epochs')\n parser.add_argument('-fm', '--fix-masks', action='store_true')\n parser.add_argument('-ft', '--fine-tune', action='store_true')\n parser.add_argument('-l', '--loss', type=str, default='bce', help='Loss (lovasz, bce_iou)')\n parser.add_argument('-lr', '--learning-rate', type=float, default=1e-3, help='Initial learning rate')\n parser.add_argument('-lrs', '--lr-scheduler', default=None, help='LR scheduler')\n parser.add_argument('-m', '--model', required=True, type=str, help='Name of the model')\n parser.add_argument('-multi-gpu', '--multi-gpu', action='store_true')\n parser.add_argument('-nc', '--num-classes', default=1, type=int, help='Run on extremely reduced dataset')\n parser.add_argument('-nd', '--no-dropout', action='store_true', help='Disable dropout (if model has it)')\n parser.add_argument('-npt', '--no-pretrain', action='store_true', help='Disables use of pretrain weights for encoders')\n parser.add_argument('-o', '--optimizer', default='Adam', help='Name of the optimizer')\n parser.add_argument('-p', '--prepare', type=str, default='128', help='Possible tile preparations (128, 128pad, 224, 224pad, 256, 256pad)')\n parser.add_argument('-r', '--resume', type=str, default=None, help='Checkpoint filename to resume')\n parser.add_argument('-re', '--restart-every', type=int, default=-1, help='Restart optimizer every N epochs')\n parser.add_argument('-s', '--stratify', default=None, type=str, help='Stratification class. One of: coverage, depth')\n parser.add_argument('-tm', '--target-metric', type=str, default='val_lb', help='Target metric to use for storing snapshots')\n parser.add_argument('-w', '--workers', default=0, type=int, help='Num workers')\n parser.add_argument('-wd', '--weight-decay', type=float, default=0, help='L2 weight decay')\n parser.add_argument('-x', '--experiment', type=str, help='Name of the experiment')\n\n return parser", "def build_parser():\n parser = argparse.ArgumentParser(description='The classic FizzBuzz game in programmatic form.', add_help=False)\n parser.add_argument('-h', '--help', default=argparse.SUPPRESS, action='help',\n help='Show this help message and exit.')\n parser.add_argument('-s', '--start', default=1, type=int, action='store', metavar='START',\n help='The number to start FizzBuzzing at (inclusive).')\n parser.add_argument('stop', type=int, action='store', metavar='STOP',\n help='The number to end FizzBuzzing at (exclusive).')\n return parser", "def parse_commandline():\n parser = optparse.OptionParser()\n\n parser.add_option(\"-l\",\"--lamp\", default=100, type=int)\n parser.add_option(\"-c\",\"--doCompile\", action=\"store_true\", default=False)\n parser.add_option(\"--doLamp\", action=\"store_true\", default=False)\n\n opts, args = parser.parse_args()\n\n return opts", "def argument_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"source\",\n type=argparse.FileType(\"r\"),\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"store_true\",\n help=\"be verbose in output\",\n )\n parser.add_argument(\n \"-p\",\n \"--permissive\",\n action=\"store_true\",\n help=\"allow permissive validation (just checking presence)\",\n )\n return parser", "def parse_options() -> Namespace:\n\n opt_parser = OptionParser(\n \"liftoff\",\n [\n \"script\",\n \"config_path\",\n \"procs_no\",\n \"gpus\",\n \"per_gpu\",\n \"no_detach\",\n \"verbose\",\n \"copy_to_clipboard\",\n \"time_limit\", # This should be removed in favour of start_by\n \"start_by\",\n \"end_by\",\n \"optimize\",\n \"args\",\n \"filters\",\n \"results_path\",\n \"name\",\n \"max_runs\",\n \"shuffle\",\n ],\n )\n return opt_parser.parse_args()", "def make_arguments_parser():\n parser = ArgumentParser(description=__doc__, epilog=\"\"\"CSS-HTML-JS-Minify:\n Takes a file or folder full path string and process all CSS/HTML/JS found.\n If argument is not file/folder will fail. Check Updates works on Python3.\n Std-In to Std-Out is deprecated since it may fail with unicode characters.\n SHA1 HEX-Digest 11 Chars Hash on Filenames is used for Server Cache.\n CSS Properties are Alpha-Sorted, to help spot cloned ones, Selectors not.\n Watch works for whole folders, with minimum of ~60 Secs between runs.\"\"\")\n parser.add_argument('--version', action='version', version=__version__)\n parser.add_argument('fullpath', metavar='fullpath', type=str,\n help='Full path to local file or folder.')\n parser.add_argument('--wrap', action='store_true',\n help=\"Wrap output to ~80 chars per line, CSS only.\")\n parser.add_argument('--prefix', type=str,\n help=\"Prefix string to prepend on output filenames.\")\n parser.add_argument('--timestamp', action='store_true',\n help=\"Add a Time Stamp on all CSS/JS output files.\")\n parser.add_argument('--quiet', action='store_true', help=\"Quiet, Silent.\")\n parser.add_argument('--obfuscate', action='store_true',\n help=\"Obfuscate Javascript. JS only. (Recommended).\")\n parser.add_argument('--checkupdates', action='store_true',\n help=\"Check for updates from internet while running.\")\n parser.add_argument('--tests', action='store_true', help=\"Run Unit Tests.\")\n parser.add_argument('--hash', action='store_true',\n help=\"Add SHA1 HEX-Digest 11chars Hash to Filenames.\")\n parser.add_argument('--gzip', action='store_true',\n help=\"GZIP Minified files as '*.gz', CSS/JS only.\")\n parser.add_argument('--sort', action='store_true',\n help=\"Alphabetically Sort CSS Properties, CSS only.\")\n parser.add_argument('--comments', action='store_true',\n help=\"Keep comments, CSS/HTML only (Not Recommended)\")\n parser.add_argument('--overwrite', action='store_true',\n help=\"Force overwrite all in-place (Not Recommended)\")\n parser.add_argument('--after', type=str,\n help=\"Command to execute after run (Experimental).\")\n parser.add_argument('--before', type=str,\n help=\"Command to execute before run (Experimental).\")\n parser.add_argument('--watch', action='store_true', help=\"Watch changes.\")\n parser.add_argument('--multiple', action='store_true',\n help=\"Allow Multiple instances (Not Recommended).\")\n parser.add_argument('--_42', action='store_true')\n global args\n args = parser.parse_args()", "def parse_command_line():\n parser = argparse.ArgumentParser()\n\n # Optional Argument\n parser.add_argument('-l', '--length', metavar='length', type=float, default=2, help='length (meter)')\n parser.add_argument('-k', '--conductivity', metavar='conductivity', type=float, default=0.5, help='constant thermal conductivity (W/m.K)')\n parser.add_argument('-q', '--heatgeneration', metavar='heatgeneration', type=float, default=1000, help='uniform heat generation (kW/m^3)')\n parser.add_argument('-TA', '--tempA', metavar='tempA', type=int, default=100, help='temperature at A (Celcius)')\n parser.add_argument('-TB', '--tempB', metavar='tempB', type=int, default=200, help='temperature at A (Celcius)')\n parser.add_argument('-n', '--nodes', metavar='nodes', type=int, default=5, help='nodes (positive integer)')\n parser.add_argument('-A', '--area', metavar='area', type=float, default=1, help='area (m^2)')\n parser.add_argument('-nf', '--nofigure', action='store_true', help='disable figure')\n parser.add_argument('-nd', '--nodetail', action='store_true', help='disable detail')\n return parser.parse_args()", "def cmdline_parser():\n parser = argparse.ArgumentParser(description=\"\"\" \"\"\")\n parser.add_argument(\"-g\", \"--gta\",\n help=\"\"\"gta sequences\"\"\",\n dest=\"gta\",\n required=True)\n return parser", "def parse_arguments():\n parser = argparse.ArgumentParser(prog='AdapterRunner', description='Adapter Runner Application')\n parser.add_argument('-a', '--application', action='store', dest='app_name', help='Application Name',\n metavar='<application_name>')\n parser.add_argument('-fi', '--fetch_interval', action='store', dest='fetch_stats_interval', help='Fetch Stats Interval',\n metavar='<fetch_interval in seconds>')\n return parser.parse_args()", "def parseArguments():\n # Create argument parser\n parser = argparse.ArgumentParser()\n\n # Optional arguments\n parser.add_argument(\"-t\", \"--test\", help=\"Optionally test algorithm on subsample of the data. Set to 1 for testing\", type=int, default=0)\n\n parser.add_argument(\"--cores\", help=\"Optimized code for a server with a lot of RAM, set to the number of available cores\", type=int, default=40)\n\n\n # Print version\n parser.add_argument(\"--version\", action=\"version\", version='%(prog)s - Version 2.0') #version 1.0 is for the observations in June 2018\n #version 1.1 contains the optimizations made after the june observations (mainly the switch to stackmags)\n #version 1.2 changed sim class to NOT include the list of failed candidates (not qsos)\n #... copied changes made to crossval version\n #version 1.5 added check for duplicate quasars and remove them\n #version 1.6 new simulated quasars (december)\n ##-------------------\n #version 2.0: combined training of classifier and regressor, streamlined input\n #version 2.1: Tryied to updates excluded area to a little more than stripe 82 but decided not to keep it, so no change\n\n # Parse arguments\n args = parser.parse_args()\n\n return args", "def parseArguments():\n parser = argparse.ArgumentParser(description='Tool run benchmarks and query database')\n parser.add_argument('--version', action=\"store_true\", dest=\"version\", default=False, help=\"Print version\")\n parser.add_argument(\"--query\", \"-q\", action=\"store_true\", dest=\"queryDataBase\", default=False, help=\"Query Data Base\")\n parser.add_argument(\"--performance\", \"-p\", action=\"store_true\", dest=\"queryPerformance\", default=False, help=\"Query Data Base - Performance Metrics\")\n parser.add_argument(\"--run\", \"-r\", action=\"store_true\", dest=\"runBenchmarks\", default=False, help=\"Run Benchmarks and store results in the DB\")\n args = parser.parse_args()\n return args", "def getArgumentParser():\n parser = argparse.ArgumentParser(description=\"Script for running optimization for the ZH dark photon SR\")\n parser.add_argument('-i',\n '--infile',\n dest='infile',\n help='Input CSV file',\n default = '/afs/cern.ch/user/e/ehofgard/public/data/all_data')\n parser.add_argument('-o',\n '--output',\n dest='outdir',\n help='Output directory for plots, selection lists, etc',\n default='outdir')\n \n return parser", "def get_parser():\n\n parser = ArgumentParser()\n\n req_argument = parser.add_argument_group('required arguments')\n\n parser.add_argument(\"-o\", \"--outdir\", type=str, default='result',\n help=\"Path for results\")\n parser.add_argument(\"-fname\", \"--file_name\", type=str, default=\"try1\",\n help=\"The name the output file should have within the output directory\")\n parser.add_argument(\"-freq\", \"--frequency\", type=str,\n help=\"File to read the haplotype frequencies from\")\n parser.add_argument(\"-over\", \"--overlap\", type=str,\n help=\"File to read the peptide vs alleles or peptide vs haplotype data\")\n parser.add_argument(\"-o_a\", \"--overlap_allele\", type=int, default=0,\n help=\"1 if the --overlap file passed in is peptide vs alleles and 0 if it is peptide vs haplotypes and has already been binarized\")\n # parser.add_argument(\"-n\", \"--ntarget\", type=int, default=5,\n # help=\"The ntarget for max n-times coverage\")\n parser.add_argument(\"-maxpep\", \"--max_number_of_pepts\", type=int, default=30,\n help=\"The maximum number of peptides allowed in a vaccine\")\n parser.add_argument(\"-c\", \"--cut\", type=int, default=3,\n help=\"The cut value for ommitting peptides that are too similar; a value of 0 should be provided if similar peptides are not to be excluded from a vaccine design.\")\n\n\n \n return parser", "def make_parser():\n parser_ = argparse.ArgumentParser(\n description=\"\"\"\n A tool to retrieve history from\n (almost) any browser on (almost) any platform\n\n██████╗ ██████╗ ██████╗ ██╗ ██╗███████╗███████╗██████╗ ██╗ ██╗██╗███████╗████████╗ ██████╗ ██████╗ ██╗ ██╗\n██╔══██╗██╔══██╗██╔═══██╗██║ ██║██╔════╝██╔════╝██╔══██╗ ██║ ██║██║██╔════╝╚══██╔══╝██╔═══██╗██╔══██╗╚██╗ ██╔╝\n██████╔╝██████╔╝██║ ██║██║ █╗ ██║███████╗█████╗ ██████╔╝█████╗███████║██║███████╗ ██║ ██║ ██║██████╔╝ ╚████╔╝\n██╔══██╗██╔══██╗██║ ██║██║███╗██║╚════██║██╔══╝ ██╔══██╗╚════╝██╔══██║██║╚════██║ ██║ ██║ ██║██╔══██╗ ╚██╔╝\n██████╔╝██║ ██║╚██████╔╝╚███╔███╔╝███████║███████╗██║ ██║ ██║ ██║██║███████║ ██║ ╚██████╔╝██║ ██║ ██║\n╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚══╝╚══╝ ╚══════╝╚══════╝╚═╝ ╚═╝ ╚═╝ ╚═╝╚═╝╚══════╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝\n \"\"\", # noqa: E501\n epilog=\"\"\"\n Checkout the GitHub repo\n https://github.com/pesos/browser-history\n if you have any issues or want to help contribute\"\"\",\n formatter_class=RawDescriptionHelpFormatter,\n )\n\n parser_.add_argument(\n \"-t\",\n \"--type\",\n default=\"history\",\n help=f\"\"\"\n argument to decide whether to retrieve history or bookmarks.\n Should be one of {AVAILABLE_TYPES}.\n Default is history.\"\"\",\n )\n parser_.add_argument(\n \"-b\",\n \"--browser\",\n default=\"all\",\n help=f\"\"\"\n browser to retrieve history or bookmarks from. Should be one\n of all, default, {AVAILABLE_BROWSERS}.\n Default is all (gets history or bookmarks from all browsers).\n \"\"\",\n )\n\n parser_.add_argument(\n \"-f\",\n \"--format\",\n default=\"infer\",\n help=f\"\"\"\n Format to be used in output. Should be one of {AVAILABLE_FORMATS}.\n Default is infer (format is inferred from the output file's\n extension. If no output file (-o) is specified, it defaults to csv)\"\"\",\n )\n\n parser_.add_argument(\n \"-o\",\n \"--output\",\n default=None,\n help=\"\"\"\n File where history output or bookmark output is to be written.\n If not provided, standard output is used.\"\"\",\n )\n\n parser_.add_argument(\n \"-p\",\n \"--profile\",\n default=None,\n help=\"\"\"\n Specify the profile from which to fetch history or bookmarks. If\n not provided all profiles are fetched\n \"\"\",\n )\n\n parser_.add_argument(\n \"--show-profiles\",\n default=None,\n metavar=\"BROWSER\",\n help=f\"\"\"\n List all available profiles for a given browser where browser\n can be one of default, {AVAILABLE_BROWSERS}. The browser\n must always be provided.\n \"\"\",\n )\n\n parser_.add_argument(\n \"-v\", \"--version\", action=\"version\", version=\"%(prog)s \" + __version__\n )\n\n return parser_", "def init_argparser() -> ArgumentParser:\n parser = ArgumentParser()\n from_config = parser.add_argument_group('From config file', 'Provide full experiment setup via config file')\n from_config.add_argument('-c', '--config', help='Path to json file containing classification config.')\n from_cmd = parser.add_argument_group('From commandline', 'Specify experiment setup via commandline arguments')\n\n # Model options\n from_cmd.add_argument(\"--recoding_type\", type=str, default=None,\n choices=[\"mc_dropout\", \"surprisal\", \"ensemble\"],\n help=\"Recoding model type used for trainign. Choices include recoding based on MC Dropout,\"\n \"perplexity and anchored ensembles. If not specified, a vanilla model without recoding\"\n \"is used.\")\n from_cmd.add_argument(\"--step_type\", type=str, default=None, choices=[\"fixed\", \"mlp\", \"learned\"],\n help=\"Specifies the way the step size is determined when using a recoding model.\")\n from_cmd.add_argument(\"--step_size\", type=float,\n help=\"Step size for recoding in case the fixed step predictor is used.\")\n from_cmd.add_argument(\"--embedding_size\", type=int, help=\"Dimensionality of word embeddings.\")\n from_cmd.add_argument(\"--hidden_size\", type=int, help=\"Dimensionality of hidden states.\")\n from_cmd.add_argument(\"--num_layers\", type=int, help=\"Number of network layers.\")\n from_cmd.add_argument(\"--mc_dropout\", type=float, help=\"Dropout probability when estimating uncertainty.\")\n from_cmd.add_argument(\"--dropout\", type=float, help=\"Dropout probability for model in general.\")\n from_cmd.add_argument(\"--num_samples\", type=int, help=\"Number of samples used when estimating uncertainty.\")\n\n # Training options\n from_cmd.add_argument(\"--weight_decay\", type=float, help=\"Weight decay parameter when estimating uncertainty.\")\n from_cmd.add_argument(\"--prior_scale\", type=float,\n help=\"Prior length scale. A lower scale signifies a prior belief that the input data is \"\n \"distributed infrequently, a higher scale does the opposite.\")\n from_cmd.add_argument(\"--learning_rate\", type=float, help=\"Learning rate during training.\")\n from_cmd.add_argument(\"--batch_size\", type=int, help=\"Batch size during training.\")\n from_cmd.add_argument(\"--num_epochs\", type=int, help=\"Number of training epochs.\")\n from_cmd.add_argument(\"--clip\", type=float, help=\"Threshold for gradient clipping.\")\n\n # Corpus options\n from_cmd.add_argument(\"--corpus_dir\", type=str, help=\"Directory to corpus files.\")\n from_cmd.add_argument(\"--max_seq_len\", type=int, help=\"Maximum sentence length when reading in the corpora.\")\n\n # Screen output optins\n from_cmd.add_argument(\"--print_every\", type=int, help=\"Batch interval at which training info should be printed.\")\n from_cmd.add_argument(\"--eval_every\", type=int,\n help=\"Epoch interval at which the model should be evaluated on validation set.\")\n\n # Model saving and logging options\n from_cmd.add_argument(\"--model_name\", type=str, help=\"Model identifier.\")\n from_cmd.add_argument(\"--model_save_path\", type=str,\n help=\"Directory to which current best model should be saved to.\")\n from_cmd.add_argument(\"--device\", type=str, default=\"cpu\", help=\"Device used for training.\")\n from_cmd.add_argument(\"--log_dir\", type=str, help=\"Directory to write (tensorboard) logs to.\")\n\n return parser", "def build_argparser():\n parser = ArgumentParser()\n parser.add_argument(\"-m\", \"--model\", required=True, type=str,\n help=\"Path to an xml file with a trained model.\")\n parser.add_argument(\"-i\", \"--input\", required=True, type=str,\n help=\"Path to image or video file\")\n parser.add_argument(\"-l\", \"--cpu_extension\", required=False, type=str,\n default=None,\n help=\"MKLDNN (CPU)-targeted custom layers.\"\n \"Absolute path to a shared library with the\"\n \"kernels impl.\")\n parser.add_argument(\"-d\", \"--device\", type=str, default=\"CPU\",\n help=\"Specify the target device to infer on: \"\n \"CPU, GPU, FPGA or MYRIAD is acceptable. Sample \"\n \"will look for a suitable plugin for device \"\n \"specified (CPU by default)\")\n parser.add_argument(\"-pt\", \"--prob_threshold\", type=float, default=0.5,\n help=\"Probability threshold for detections filtering\"\n \"(0.5 by default)\")\n parser.add_argument('--log_level', type=str, choices=['debug', 'info', 'warning', 'error', 'critical'])\n return parser", "def _setup_parser():\n parser = argparse.ArgumentParser(add_help=True)\n parser.add_argument('--eval_model', type=str, default=None)\n parser.add_argument('--stack', type=int, default=1)\n parser.add_argument('--flare', action='store_true')\n parser.add_argument('--mixreg', action='store_true')\n\n env_group = parser.add_argument_group(\"Env Args\")\n env_group.add_argument('--env_name', type=str, default=ENV_NAME)\n env_group.add_argument('--num_envs', type=int, default=NUM_ENVS)\n env_group.add_argument('--num_levels', type=int, default=NUM_LEVELS)\n env_group.add_argument('--start_level', type=int, default=START_LEVEL)\n\n agent_group = parser.add_argument_group(\"Agent Args\")\n PPOAgent.add_to_argparse(agent_group)\n\n model_group = parser.add_argument_group(\"Model Args\")\n ImpalaPPO.add_to_argparse(model_group)\n\n return parser" ]
[ "0.72827846", "0.7190032", "0.7093924", "0.7032623", "0.70229095", "0.69860554", "0.6946884", "0.694275", "0.693303", "0.6930106", "0.69170386", "0.68352914", "0.6832233", "0.68094486", "0.68094486", "0.6783497", "0.6778075", "0.6772473", "0.675373", "0.6753382", "0.6745362", "0.67453533", "0.67277616", "0.6726878", "0.67200977", "0.67179614", "0.6714497", "0.6710915", "0.67108697", "0.6709461", "0.67072237", "0.6707107", "0.67058754", "0.66951764", "0.669295", "0.66789055", "0.6668219", "0.66521513", "0.6643682", "0.663348", "0.66223717", "0.66127026", "0.6612087", "0.65901315", "0.65893686", "0.65878814", "0.65717685", "0.6571755", "0.6571302", "0.65610194", "0.6560053", "0.6554782", "0.6549412", "0.6548388", "0.65478945", "0.6547051", "0.65459913", "0.6543047", "0.6536933", "0.65363085", "0.65329343", "0.6528922", "0.6525841", "0.65251374", "0.65245014", "0.6522853", "0.6522853", "0.6522853", "0.65182495", "0.65180236", "0.6515419", "0.65133804", "0.65131754", "0.6505336", "0.64989084", "0.6492662", "0.6491342", "0.64909226", "0.6487511", "0.648746", "0.6486044", "0.6484204", "0.64755386", "0.64747065", "0.6474691", "0.64695555", "0.6468781", "0.6468313", "0.6464099", "0.6463307", "0.64596814", "0.6459538", "0.6459139", "0.6458402", "0.64583963", "0.6454141", "0.64534485", "0.6447247", "0.6446502", "0.64459866" ]
0.788973
0
Max secondary depth based on modelshift secondary test from Jeff Coughlin
def modelshift_weaksec(koi): num = KOIDATA.ix[ku.koiname(koi), 'koi_tce_plnt_num'] if np.isnan(num): num = 1 kid = KOIDATA.ix[ku.koiname(koi), 'kepid'] tce = '{:09.0f}-{:02.0f}'.format(kid,num) #return largest depth between DV detrending and alternate detrending try: r = ROBOVETDATA.ix[tce] except KeyError: raise NoWeakSecondaryError(koi) depth_dv = r['mod_depth_sec_dv'] * (1 + 3*r['mod_fred_dv'] / r['mod_sig_sec_dv']) depth_alt = r['mod_depth_sec_alt'] * (1 + 3*r['mod_fred_alt'] / r['mod_sig_sec_alt']) logging.debug(r[['mod_depth_sec_dv','mod_fred_dv','mod_sig_sec_dv']]) logging.debug(r[['mod_depth_sec_alt','mod_fred_alt','mod_sig_sec_alt']]) if np.isnan(depth_dv) and np.isnan(depth_alt): #return weaksec_vv2(koi) raise NoWeakSecondaryError(koi) elif np.isnan(depth_dv): return depth_alt elif np.isnan(depth_alt): return depth_dv else: return max(depth_dv, depth_alt)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_max_depth_val():\n data = SUNRGBDTrainDataset(True)\n return max([data[0][i][-1].flatten().item() for i in range(len(data))])", "def estimate_maxm_neutral_axis_depth(self):\r\n\t\txu_d = abs(self.concrete.max_compresive_strain)/\\\r\n\t\t\t(abs(self.concrete.max_compresive_strain) + self.steel.max_tensile_strain)\r\n\t\tself.max_positive_na = xu_d*self.positive_effective_depth\r\n\t\tself.max_negative_na = xu_d*self.negative_effective_depth", "def score_max_depths(graph, max_depths):\n ###TODO\n pass", "def get_max_dmag_from_depth(depth):\n return 2.5 * np.log10(depth)", "def test_random_forest_max_depth_parameter(params, X_train, X_test, y_train, y_test):", "def max_target(board, depth, alpha, beta):\n if terminal(board) or depth == DEPTH:\n return utility(board)\n\n best_val = -math.inf\n for action in actions(board):\n val = min_target(result(board, action), depth+1, alpha, beta)\n best_val = max(best_val, val)\n alpha = max(alpha, best_val)\n if beta <= alpha:\n break\n\n return best_val", "def max_well(self):\n maxVal = np.max(self.get_well_depth_image())\n return maxVal", "def est_maxlevel(dims,bandwidth):\n lev = math.floor((math.log(min(dims))/math.log(2)-2)/bandwidth)\n lev=int(lev)\n return lev", "def max_depth(self) -> int:\n return 0", "def max_value(self, state, max_alpha, max_beta, max_depth):\r\n if state.terminal_test():\r\n return state.utility(0)\r\n if max_depth <=0 :\r\n return self.score(state)\r\n\r\n v = float(\"-inf\")\r\n for a in state.actions():\r\n v = max(v, self.min_value(state.result(a), max_alpha, max_beta, max_depth - 1))\r\n if v >= max_beta:\r\n return v\r\n max_alpha = max(max_alpha, v)\r\n return v", "def max_depth_forest(self):\n return max(x.tree_.max_depth for x in self.result.estimators_)", "def max_depth(self) -> int:\n return pulumi.get(self, \"max_depth\")", "def max_depth(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_depth\")", "def find_max_score_location(grid, shape):", "def max_power_candidate_thermal_rule(_m, g, y, s, t):\r\n\r\n return m.p[g, y, s, t] - sum(m.x_c[g, j] for j in m.Y if j <= y) <= 0", "def leaf_prediction(self, node_id): #returns y_idx\n values = self.tree.value[node_id]\n return np.argmax(values)", "def get_max_passes(example_height: int) -> int:\n return (example_height - 5) // 4", "def max_power_out_candidate_storage_rule(_m, g, y, s, t):\r\n\r\n return m.p_out[g, y, s, t] - sum(m.x_c[g, j] for j in m.Y if j <= y) <= 0", "def dfs_maximizing(state) :\n #print state.describe_previous_move()\n global state_evals, path, _path, _score, level, _state;\n\n level+=1\n path.append(state)\n for stt in state.generate_next_states():\n score=0\n agenda.append((stt, level))\n \n if stt.is_game_over():\n state_evals+=1\n score=stt.get_endgame_score()\n if score>_score:\n _score=score\n _path = path[0:]\n _state = stt\n if not agenda:\n\n _path.append(_state)\n return [_path, _score, state_evals];\n else:\n new_state, level=agenda.pop()\n path=path[0:level]\n level-=1\n return dfs_maximizing(new_state)", "def maximumDistance(self):\n from ete2 import Tree\n t = Tree(name='LUCA_root')\n empty_forest = {'sp':t,'gns':t,'fam':t,'ord':t,'cls':t,'phy':t,'kng':t}\n return self.distanceToTree(empty_forest,update_inner_attributes=False)", "def _height1(self): #works but n^2 time\n return max(self.depth(p) for p in self.positions() if self.is_leaf(p))", "def get_max_A_depth(output_list):\n A_matrix_size_per_gen = [np.shape(gen[0]) for gen in output_list\n if gen is not None]\n\n return max([A_shape[-3] for A_shape in A_matrix_size_per_gen], default=0)", "def get_local_max_species_diffusivity(actx, discr, d_alpha):\n if len(d_alpha) == 0:\n return 0\n if not isinstance(d_alpha[0], DOFArray):\n return max(d_alpha)\n\n from functools import reduce\n return reduce(actx.np.maximum, d_alpha)", "def max_power_existing_thermal_rule(_m, g, y, s, t):\r\n\r\n return m.p[g, y, s, t] - (m.P_MAX[g] * (1 - m.F[g, y])) <= 0", "def _get_max_nodal_plane_number(sourceModel):\n num = 0\n numhd = 0\n cnt = 0\n numbins = 0\n for src in sourceModel.sources:\n\n if isinstance(src, AreaSource):\n num = len(src.nodal_plane_dist) if \\\n len(src.nodal_plane_dist) > num else num\n numhd = len(src.hypo_depth_dist) if \\\n len(src.hypo_depth_dist) > numhd else numhd\n\n if isinstance(src.mfd, IncrementalMFD):\n numbins = len(src.mfd.occur_rates) if \\\n len(src.mfd.occur_rates) > numhd else numhd\n\n cnt += 1\n print 'The model contains %d area sources' % (cnt)\n return num, numhd, numbins", "def test_check_for_max_rmsd():\n phil_groups = ncs_group_master_phil.fetch(\n iotbx.phil.parse(phil_str)).extract()\n pdb_inp = iotbx.pdb.input(source_info=None, lines=test_pdb_str_2)\n ncs_obj_phil = ncs.input(\n hierarchy=pdb_inp.construct_hierarchy(),\n ncs_phil_groups=phil_groups.ncs_group)\n nrgl = ncs_obj_phil.get_ncs_restraints_group_list()\n pdb_inp = iotbx.pdb.input(lines=test_pdb_str_2,source_info=None)\n ph = pdb_inp.construct_hierarchy()\n # passing test\n assert nrgl.check_for_max_rmsd(ph.atoms().extract_xyz() ,chain_max_rmsd=1)\n # make sure test fails when it suppose to\n nrgl[0].copies[1].t = matrix.col([100, -89.7668, 5.8996])\n assert not nrgl.check_for_max_rmsd(ph.atoms().extract_xyz(),chain_max_rmsd=1)", "def max_level(self):\n return self.__max", "def max_power_in_candidate_storage_rule(_m, g, y, s, t):\r\n\r\n return m.p_in[g, y, s, t] - sum(m.x_c[g, j] for j in m.Y if j <= y) <= 0", "def __get_max_depth(self, conf):\n return conf[self.conf_item.get_max_depth()]", "def _max_value(\r\n self,\r\n state: TwoPlayerGameState,\r\n depth: int,\r\n ) -> float:\r\n if state.end_of_game or depth == 0:\r\n minimax_value = self.heuristic.evaluate(state)\r\n\r\n else:\r\n minimax_value = -np.inf\r\n\r\n successors = self.generate_successors(state)\r\n for successor in successors:\r\n if self.verbose > 1:\r\n print('{}: {}'.format(state.board, minimax_value))\r\n\r\n successor_minimax_value = self._min_value(\r\n successor, depth - 1,\r\n )\r\n if (successor_minimax_value > minimax_value):\r\n minimax_value = successor_minimax_value\r\n\r\n if self.verbose > 1:\r\n print('{}: {}'.format(state.board, minimax_value))\r\n\r\n return minimax_value", "def get_max_depth(self):\n return self.MAX_DEPTH", "def get_max_depth(clf):\n tree =clf.tree_\n def get_node_depths_(current_node, current_depth, l, r, depths):\n depths += [current_depth]\n if l[current_node] != -1 and r[current_node] != -1:\n get_node_depths_(l[current_node], current_depth + 1, l, r, depths)\n get_node_depths_(r[current_node], current_depth + 1, l, r, depths)\n\n depths = []\n get_node_depths_(0, 0, tree.children_left, tree.children_right, depths) \n return max(depths)", "def _get_new_max(self, insert=True):\n right = 1\n left = 1\n if self._root:\n if self._root._rkid:\n right = self._new_depth(self._root._rkid, 2)\n if self._root._lkid:\n left = self._new_depth(self._root._lkid, 2)\n self._rbal = right\n self._lbal = left\n if insert:\n if right > left:\n if right > self._max_depth:\n self._max_depth = right\n elif left > self._max_depth:\n self._max_depth = left\n else:\n if right > left:\n if right < self._max_depth:\n self._max_depth = right\n elif left < self._max_depth:\n self._max_depth = left", "def _max_depth(self):\n max_depth = 0\n for node, data in self.traverse():\n max_depth = max(max_depth, data['level'])\n return max_depth", "def find_max(subimage):\r\n\tmax_val_subimage = np.nanmax(subimage)\r\n\treturn max_val_subimage", "def max_value(tree):\n max_utility = float(\"-inf\")\n \n if (is_terminal(tree)):\n return tree\n else:\n #options = []\n for node in tree:\n #options.append(max_value(node))\n max_utility = max(max_utility, min_value(node))\n return max_utility", "def max_power_candidate_wind_rule(_m, g, y, s, t):\r\n\r\n return m.p[g, y, s, t] - (m.Q_W[g, y, s, t] * sum(m.x_c[g, j] for j in m.Y if j <= y)) <= 0", "def max_diaphragmatic_level(levels):\n return [max(x) for x in levels]", "def _find_max(self, root):\n while root.right:\n root = root.right\n return root", "def _active_depth(self):\n for n_left, n_right in self.graph.dfs():\n if self.node(n_right)['pad'] == 0:\n return self.node(n_right)['level']\n return 0", "def DEFAULT_MAX_DEPTH_DIFF(self): # real signature unknown; restored from __doc__\n pass", "def depth(x):\n return max(int(x * depth_multiplier), 8)", "def max_value(self, game, depth):\n if self.time_left() < self.TIMER_THRESHOLD: # Timeout check\n raise SearchTimeout()\n\n if game.is_loser(self) or game.is_winner(self) or depth == 0: # Terminal test, checks base cases\n return self.score(game,self) # returns the score, UTILITY of the current state\n legal_moves = game.get_legal_moves() # obtain all legal moves for game, ACTIONs that can be taken\n best_score = -math.inf # abstraction assignment of neg. infinity(lowest possible value for MAX score)\n for m in legal_moves: # iterate through all available actions\n new_state = game.forecast_move(m) # for each available move, forecast the resulting state from that ACTION\n # RESULT of ACTION\n score = self.max_value(new_state, depth - 1) # recursively uses the new state\n best_score = max(best_score,score) # calculates the minimizing score between the states\n return best_score # propagates minimizing score for given state", "def get_depth_out(self):\n self.depth_out = self.bottle_params[-1][0][-1]\n self.depth_middle = self.bottle_params[-2][0][-1]\n self.stride = self.bottle_params[-2][1]", "def get_max_score(location_list, grid, shape):", "def max_level(data: np.ndarray) -> int:\n shape = data.shape[1:] # exclude channel dimension\n return min(shape).bit_length() - 1", "def max_power_candidate_solar_rule(_m, g, y, s, t):\r\n\r\n return m.p[g, y, s, t] - (m.Q_S[g, y, s, t] * sum(m.x_c[g, j] for j in m.Y if j <= y)) <= 0", "def get_max_depth_node(nodes):\n curr = nodes[0]\n for i in range(0, len(nodes)):\n if nodes[i].depth > curr.depth:\n curr = nodes[i]\n return curr", "def evaluate_depth(opt):\r\n MIN_DEPTH = 1e-3\r\n MAX_DEPTH = 80\r\n\r\n if not opt.use_ext_res:\r\n # ----------\r\n # Prepare models\r\n # ----------\r\n assert os.path.isdir(opt.weights_dir), 'folder: %s doesn\\'t exists' % opt.weights_dir\r\n print('-> Loading weights from {}'.format(opt.weights_dir))\r\n # filenames = readlines(os.path.join(splits_dir, opt.eval_split, \"test_files.txt\"))\r\n\r\n models = {'depth_enc': ResNet18_new([2, 2, 2, 2]),\r\n 'depth_dec': DepthDecoder_full()}\r\n build_models(models)\r\n for m_name in depth_model_names:\r\n m_path = os.path.join(opt.weights_dir, m_name + '.h5')\r\n models[m_name].load_weights(m_path)\r\n\r\n # ----------\r\n # Get dataset\r\n # ----------\r\n print('-> Preparing dataset: KITTI_Raw...')\r\n split_folder = os.path.join('splits', opt.eval_split)\r\n split_name = 'test_files.txt'.format(opt.eval_split)\r\n path_tmp = os.path.join(split_folder, split_name)\r\n assert os.path.isfile(path_tmp), '%s is not valid path to split files' % path_tmp\r\n\r\n opt.frame_idx = [0]\r\n num_scales = 1\r\n batch_size = 16\r\n dataset = KITTIRaw(split_folder, split_name, data_path=opt.data_path)\r\n data_loader = DataLoader(dataset, num_epoch=1,\r\n batch_size=batch_size, frame_idx=opt.frame_idx)\r\n eval_iter = data_loader.build_eval_dataset()\r\n batch_processor = DataProcessor(frame_idx=opt.frame_idx, num_scales=num_scales,\r\n intrinsics=dataset.K)\r\n\r\n # ----------\r\n # Generate predicted disparity map\r\n # ----------\r\n print('-> Generate predicted disparity map...')\r\n pred_disps = []\r\n output = {}\r\n for batch in eval_iter:\r\n # batch = eval_iter.get_next()\r\n input_imgs, input_Ks = batch_processor.prepare_batch_val(batch)\r\n input_color = input_imgs[('color', 0, 0)]\r\n disp_raw = models['depth_dec'](models['depth_enc'](input_color))\r\n output[('disp', 0)] = disp_raw['output_0']\r\n pred_disp, _ = disp_to_depth(output[('disp', 0)], opt.min_depth, opt.max_depth)\r\n pred_disp = pred_disp[..., 0].numpy() # squeeze the last dim\r\n pred_disps.append(pred_disp)\r\n\r\n pred_disps = np.concatenate(pred_disps)\r\n\r\n # todo: use imported disp to eval\r\n else:\r\n print(\"-> Loading predictions from {}\".format(opt.ext_res_path))\r\n pred_disps = np.load(opt.ext_res_path)\r\n\r\n if opt.eval_eigen_to_benchmark:\r\n eigen_to_benchmark_ids = np.load(\r\n os.path.join(splits_dir, \"benchmark\", \"eigen_to_benchmark_ids.npy\"))\r\n\r\n pred_disps = pred_disps[eigen_to_benchmark_ids]\r\n\r\n if opt.save_pred_disps:\r\n output_dir = os.path.join(root_dir, 'outputs', 'disps')\r\n if not os.path.isdir(output_dir):\r\n os.makedirs(output_dir)\r\n output_path = os.path.join(output_dir, 'disps_{}_split.npy'.format(opt.eval_split))\r\n print('-> Saving predicted disparities to ', output_path)\r\n np.save(output_path, pred_disps)\r\n\r\n # Just need generate predictions, but not to evaluate them\r\n if opt.no_eval:\r\n print(\"-> Evaluation disabled. Done.\")\r\n quit()\r\n\r\n # ----------\r\n # Get ground truth and start evaluation\r\n # ----------\r\n print(\"-> Loading depth ground truth...\")\r\n gt_path = os.path.join(splits_dir, opt.eval_split, 'gt_depths.npz')\r\n gt_depths = np.load(gt_path, fix_imports=True, encoding='latin1', allow_pickle=True)[\"data\"]\r\n\r\n print(\"-> Evaluating...\")\r\n errors = []\r\n ratios = []\r\n for i in range(len(pred_disps)):\r\n gt_depth = gt_depths[i]\r\n gt_height, gt_width = gt_depth.shape[:2]\r\n\r\n pred_disp = pred_disps[i]\r\n pred_disp = cv.resize(pred_disp, (gt_width, gt_height))\r\n pred_depth = 1. / pred_disp\r\n\r\n if opt.eval_split == \"eigen\":\r\n mask = np.logical_and(gt_depth > MIN_DEPTH, gt_depth < MAX_DEPTH)\r\n\r\n crop = np.array([0.40810811 * gt_height, 0.99189189 * gt_height,\r\n 0.03594771 * gt_width, 0.96405229 * gt_width]).astype(np.int32)\r\n crop_mask = np.zeros(mask.shape)\r\n crop_mask[crop[0]:crop[1], crop[2]:crop[3]] = 1\r\n mask = np.logical_and(mask, crop_mask)\r\n else:\r\n mask = gt_depth > 0\r\n\r\n pred_depth = pred_depth[mask] # tf.boolean_mask\r\n gt_depth = gt_depth[mask]\r\n\r\n pred_depth *= opt.pred_depth_scale_factor\r\n if opt.use_median_scaling:\r\n ratio = np.median(gt_depth) / np.median(pred_depth)\r\n ratios.append(ratio)\r\n pred_depth *= ratio\r\n\r\n pred_depth[pred_depth < MIN_DEPTH] = MIN_DEPTH\r\n pred_depth[pred_depth > MAX_DEPTH] = MAX_DEPTH\r\n\r\n errors.append(compute_errors(gt_depth, pred_depth))\r\n\r\n if opt.use_median_scaling:\r\n ratios = np.array(ratios)\r\n med = np.median(ratios)\r\n print('\\tScaling ratios | median: {:0.3f} | std: {:0.3f}'.format(med, np.std(ratios/med)))\r\n\r\n mean_errors = np.array(errors).mean(0)\r\n print(\"\\n \" + (\"{:>8} | \" * 7).format(\"abs_rel\", \"sq_rel\", \"rmse\", \"rmse_log\", \"a1\", \"a2\", \"a3\"))\r\n print((\"&{: 8.3f} \" * 7).format(*mean_errors.tolist()) + \"\\\\\\\\\")\r\n print(\"\\n-> Done!\")", "def depth(self):\n return self._max_depth", "def max_power_hydro_rule(_m, g, y, s, t):\r\n\r\n return m.p[g, y, s, t] - (m.P_H[g, y, s, t] * (1 - m.F[g, y])) <= 0", "def calc_max_Harmony(self):\n\n state = torch.linalg.pinv(self.W).matmul(-self.B - self.inpS)\n stateC = self.toConceptual(state)\n harmony = self.calc_harmony(state=state)\n return harmony, state, stateC", "def findApproxDepth(train, valid, mD=0, mS=0):\n print(\n \"Building a random set of small trees to geuss the max depth and min set size values\"\n )\n res = []\n tree = DecisionTree(train.randSubSet(120, True))\n r = 10\n s = 3\n if mD != 0:\n s = mD - 1\n r = 1\n for i in range(\n s,\n r + s,\n ):\n depth = i + 1 # depth = randint(2,(len(train[0])-1)*3)\n a = 2\n b = 15\n if mS != 0:\n a = mS\n b = mS + 1\n for min_size in range(a, b, 2):\n # min_size = randint(2,(len(train[0])-1)*2)\n tree.buildTree(depth, min_size, True)\n acc = testTreeF(tree, valid)\n res.append([depth, min_size, acc])\n print(\"%.2f\" % (100 * (i - s + 1) / r), \"percent done\")\n best = max(res, key=lambda r: r[-1])\n # res.sort(key=lambda r: r[-1])\n # for r in res:\n # print(r)\n print(\"found a depth of\", best[0], \"and min size of\", best[1])\n return best", "def _get_max_t(self):\n \"\"\"\n if hasattr(self,'k_of_t'):\n return max([ \n self.s_of_t[-1][0],\n self.i_of_t[-1][0],\n self.r_of_t[-1][0],\n self.k_of_t[-1][0],\n ])\n else:\n return max([ \n self.s_of_t[-1][0],\n self.i_of_t[-1][0],\n self.r_of_t[-1][0],\n ])\n \"\"\"\n return self.t_max", "def max_diffs(state):\n # your code here\n return best_action(state, pig_actions, Q_pig, win_diff)", "def findRFBestDepth():\n resultList = []\n BestScore = 0\n # iterate through different max_depths from 1 to 19\n for max_depth in range(1,20):\n rforest = ensemble.RandomForestClassifier(max_depth=max_depth, n_estimators=100)\n trainng_score = []\n testing_score = []\n # run 10 different cross-validation\n for index in range(10):\n # split into cross-validation sets.\n cv_data_train, cv_data_test, cv_target_train, cv_target_test = \\\n cross_validation.train_test_split(X_train, y_train, test_size=0.1)\n\n # fit the model using the cross-validation data\n # and tune parameter, such as max_depth here\n rforest = rforest.fit(cv_data_train, cv_target_train)\n trainng_score += [rforest.score(cv_data_train,cv_target_train)]\n testing_score += [rforest.score(cv_data_test,cv_target_test)]\n\n # Compute the average score for both traning and testing data\n trainng_avgScore = 1.0 * sum(trainng_score)/len(trainng_score)\n testing_avgScore = 1.0 * sum(testing_score)/len(testing_score)\n\n # find the best score\n if testing_avgScore > BestScore:\n BestScore = testing_avgScore\n best_depth = max_depth\n resultList += [[best_depth, trainng_avgScore, testing_avgScore]]\n print ('The best average score and the corresponding max_depth is: ')\n return BestScore, best_depth", "def max_path_cost(self, path, attr): \n return max([self.G[path[i]][path[i+1]][attr] for i in range(len(path)-1)])", "def miniMax(self,state,depth=0):\n \n #print(\"NextState (depth \"+str(depth)+\"):\")\n #print(\"Action: \"+state.get_action())\n \n if state in self.__explored:\n return self.__explored[state.get_hashable_state()]\n \n if state.is_end_state() or depth >= (self.__max_depth - 1):\n self.__explored[state.get_hashable_state()] = state.get_utility_value()\n return state.get_utility_value() #Return terminal state's utility value\n \n is_max_turn = state.get_max_turn()\n childList = state.get_successors()\n \n if is_max_turn:\n utility = float(\"-inf\")\n for c in childList:\n utility = max(utility,self.miniMax(c, depth+1))\n self.__explored[state.get_hashable_state()] = utility\n return utility\n else:\n utility = float(\"inf\")\n for c in childList:\n utility = min(utility,self.miniMax(c, depth+1))\n self.__explored[state.get_hashable_state()] = utility\n return utility", "def approximate_betweenness(graph, max_depth):\n ###TODO\n pass", "def test_guess_even_sampling_depth(self):\r\n data = [6, 1, 2, 9, 4, 1, 2]\r\n expected = 1 # MAD = 2.25; med - MAD = -0.25\r\n self.assertEqual(guess_even_sampling_depth(data), expected)", "def min_target(board, depth, alpha, beta):\n if terminal(board) or depth == DEPTH:\n return utility(board)\n\n best_val = math.inf\n for action in actions(board):\n val = max_target(result(board, action), depth+1, alpha, beta)\n best_val = min(val, best_val)\n beta = min(beta, best_val)\n if beta <= alpha:\n break\n\n return best_val", "def max_power_out_existing_storage_rule(_m, g, y, s, t):\r\n\r\n return m.p_out[g, y, s, t] - (m.P_OUT_MAX[g] * (1 - m.F[g, y])) <= 0", "def max_depth(self) -> int:\n if self.child_actions:\n return max(child_action.max_depth\n for child_action in self.child_actions)\n else:\n return self.depth", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def finetune_depth():\n start_depth = 3\n tol = 10E-4\n best_depth = start_depth\n acc = [-1]\n for i in tqdm(range(20),desc='Progress(max_depth)',ncols=70,smoothing=0.5):\n XGBCla = get_XGBmodel(depth=i+start_depth)\n XGBCla.fit(X_train, y_train)\n pred = XGBCla.predict(X_test)\n acc.append(accuracy_score(y_test, pred))\n if (abs(acc[i]-acc[i+1])<tol):\n break\n if (acc[i]<acc[i+1]):\n best_depth = start_depth + i\n print(\"Accuracy: %.4f\" % acc[-1])\n print(\"Best depth: %d\" % best_depth)", "def recursive_max_train(seq, remaining_tiles):\n # find what I'm playing on. This requires me to order the tiles correctly\n live_end = seq[-1][1]\n\n # get list of tile that can be played\n playable_tiles = []\n viable_legs = []\n\n for tile in remaining_tiles:\n if live_end in tile:\n playable_tiles.append(tile)\n\n # if there are no playable tiles, return incoming sequence\n if not playable_tiles:\n return seq\n\n # for each playable tile, find the longest/highest value train\n for tile in playable_tiles:\n # find remaining hand\n _my_hand = remaining_tiles.copy()\n _my_hand.remove(tile)\n\n # if tile is ordered backwards, switch it so I get the live end right\n _my_tile = tile\n\n if tile[0] == live_end:\n pass\n elif tile[1] == live_end:\n _my_tile.reverse()\n else:\n assert \"Shouldn't get here\"\n\n # RECURSION HERE. BE CAREFUL OF ORDER.\n viable_legs.append(recursive_max_train(seq + [_my_tile], _my_hand))\n\n # find length of longest viable leg\n max_leg_len = max([len(leg) for leg in viable_legs])\n\n # set max_leg_value so\n max_leg_val = 0\n\n for leg in viable_legs:\n if len(leg) == max_leg_len:\n # some multi-layer list comprehension voodoo\n leg_val = sum([pip for tile in leg for pip in tile])\n\n if leg_val > max_leg_val:\n # if this is more valuable\n max_leg_val = leg_val\n max_leg = leg\n\n return max_leg", "def best_action(self):\n child_score = self.child_Q() + self.mcts.c_puct * self.child_U()\n masked_child_score = child_score\n return np.argmax(masked_child_score)", "def max_power_existing_wind_rule(_m, g, y, s, t):\r\n\r\n return m.p[g, y, s, t] - (m.Q_W[g, y, s, t] * m.P_MAX[g] * (1 - m.F[g, y])) <= 0", "def _get_max_estimated_bandit(self)->Bandit:\n # print(\"mus - \", self.mu)\n # print(\"actions - \", np.argmax(self.mu))\n unique, counts = np.unique(self.mu, return_counts=True)\n lens = counts[np.argmax(unique)] \n if lens>1: # if two actions have same argmax\n # then return arbitrarily from those max ones\n maxs = list(np.array(self.bandits)[self.mu==unique[np.argmax(unique)]])\n return np.random.choice(maxs)\n # otherwise return the max one\n return self.bandits[np.argmax(self.mu)]", "def argmax(X):\n\tN,K,_ = X.shape\n\tg0 = X[0,0]\n\tg = X[1:]\n\n\tB = ones((N,K), dtype=int32) * -1\n\t# compute max-marginals and backtrace matrix\n\tV = g0\n\tfor t in xrange(1,N):\n\t\tU = empty(K)\n\t\tfor y in xrange(K):\n\t\t\tw = V + g[t-1,:,y]\n\t\t\tB[t,y] = b = w.argmax()\n\t\t\tU[y] = w[b]\n\t\tV = U\n\t# extract the best path by brack-tracking\n\ty = V.argmax()\n\ttrace = []\n\tfor t in reversed(xrange(N)):\n\t\ttrace.append(y)\n\t\ty = B[t, y]\n\ttrace.reverse()\n\treturn trace", "def __max_value(self, game, depth):\n self.__check_time()\n val = float('-inf')\n\n # check if out of moves or at depth limit\n if self.__is_terminal(game, depth):\n val = self.score(game, self)\n else:\n for move in game.get_legal_moves():\n # depth decremented by 1 on each call\n val = max(val, self.__min_value(game.forecast_move(move), depth - 1))\n\n return val", "def test_svd_sharpness_with_model(self):\n \t\t\n\t\tesd_before = self.watcher.get_ESD(model=self.model, layer=self.fc2_layer) \n\t\t\n\t\tself.watcher.SVDSharpness(layers=[self.fc2_layer])\n\t\tesd_after = self.watcher.get_ESD(layer=self.fc2_layer) \n\t\t\n\t\tprint(\"max esd before {}\".format(np.max(esd_before)))\n\t\tprint(\"max esd after {}\".format(np.max(esd_after)))\n\n\t\tself.assertGreater(np.max(esd_before),np.max(esd_after))", "def _height1(self): # works, but O(n^2) worst-case time\n return max(self.depth(p) for p in self.positions() if self.is_leaf(p))", "def max_power_in_existing_storage_rule(_m, g, y, s, t):\r\n\r\n return m.p_in[g, y, s, t] - (m.P_IN_MAX[g] * (1 - m.F[g, y])) <= 0", "def dirichlet_max(sampled_probas):\n\talphas = dirichlet_fit(sampled_probas)\n\treturn alphas.max(1)", "def find_optimal_depth(x_train, x_test, y_train, y_test):\n # declare variables\n max_depths = np.linspace(1, 15, 15, endpoint=True)\n train_results = []\n test_results = []\n # iterate over the different depths\n for depth in max_depths:\n trees = DecisionTreeClassifier(criterion='entropy', max_depth=depth)\n trees.fit(x_train, y_train)\n\n # Add auc score to train list\n train_pred = trees.predict(x_train)\n fpr, tpr, thresholds = roc_curve(y_train, train_pred)\n roc_auc = auc(fpr, tpr)\n train_results.append(roc_auc)\n\n # Add auc score to test list\n test_pred = trees.predict(x_test)\n fpr, tpr, thresholds = roc_curve(y_test, test_pred)\n roc_auc = auc(fpr, tpr)\n test_results.append(roc_auc)\n\n plt.figure(figsize=(8, 5))\n plt.plot(max_depths, train_results, 'b', label='Train AUC')\n plt.plot(max_depths, test_results, 'r', label='Test AUC')\n plt.ylabel('AUC score', fontsize=16)\n plt.xlabel('Tree depth', fontsize=16)\n plt.legend()\n plt.show()\n return", "def max_power_existing_solar_rule(_m, g, y, s, t):\r\n\r\n return m.p[g, y, s, t] - (m.Q_S[g, y, s, t] * m.P_MAX[g] * (1 - m.F[g, y])) <= 0", "def max():\n valid=result_alpha.F>0\n src_data.F[valid]=np.maximum( src_data.F[valid],result_data.F[valid] )", "def _update_max(self):\n tmp = self\n while tmp.right is not None:\n tmp = tmp.right\n return tmp.parent.key", "def depthwise_conv2d_model():\n\n inputs = tf.keras.Input(shape=(10, 10, 3,))\n x = tf.keras.layers.Conv2D(16, (1, 1))(inputs)\n x = tf.keras.layers.SeparableConv2D(10, (2, 2))(x)\n x = tf.keras.layers.DepthwiseConv2D(3, (1, 1))(x)\n x = tf.keras.layers.Conv2D(8, (1, 1))(x)\n x = tf.keras.layers.Flatten()(x)\n outputs = tf.keras.layers.Dense(10, activation=tf.nn.softmax, name=\"depthwise_conv2d_model\")(x)\n return outputs", "def scale_depth_disp(self, pred): # TODO\n disp = 1. / pred\n min_disp = 1. / self.hparams.max_depth\n max_disp = 1. / self.hparams.min_depth\n scaled_disp = min_disp + (max_disp - min_disp) * ((disp - np.min(disp)) / (np.max(disp) - np.min(disp)))\n scaled_depth = 1. / scaled_disp\n return scaled_disp, scaled_depth", "def max_value(board, max_util, min_util, depth):\r\n \r\n global nodes_generated \r\n global min_prune\r\n global max_prune\r\n global max_depth\r\n \r\n nodes_generated += 1\r\n max_depth = max(max_depth,depth)\r\n \r\n if cutoff_search(board, depth):\r\n return evaluation(board)\r\n v = -1000\r\n moves = legal_moves(board,1)\r\n for move in moves:\r\n temp_board = camelot_board.Camelot(list(board.white),list(board.black))\r\n state = action(temp_board, move, 1)\r\n v = max(v, min_value(state, max_util, min_util, depth + 1))\r\n if v >= min_util:\r\n max_prune += 1\r\n return v\r\n max_util = max(max_util, v)\r\n return v", "def max_powerflow_rule(_m, l, y, s, t):\r\n\r\n return m.p_L[l, y, s, t] - m.POWERFLOW_MAX[l] <= 0", "def get_max(self):\n if not self:\n return None\n return self.right.get_max() if self.right else self.value #Ternarary Operator", "def get_max_depth(clade):\n depths = clade.depths()\n if not max(depths.values()):\n depths = clade.depths(unit_branch_lengths=True)\n return max(depths.values()) * tree_depth / actual_tree_depth", "def test_find_highest_value_node_last(self):\n nn = NeuralNet(0, 0, '', '', blank=True)\n nn.create_net(2, 2, 2, 2)\n nn.eta = 0.1\n\n # Override weights to static value for reproducibility\n for node in nn.layers[2].nodes:\n node.weights = [0.6, 0.6]\n\n nn.layers[3].nodes[0].weights = [0.0, 0.0]\n nn.layers[3].nodes[1].weights = [1.0, 1.0]\n\n val = nn.assign_output([2, 3], test=True)\n self.assertEqual(val, '01')", "def get_max(self):\n\t\tif self.right:\n\t\t\treturn self.right.get_max()\n\t\treturn self.value", "def _max_value(\r\n self,\r\n state: TwoPlayerGameState,\r\n alpha: float,\r\n beta: float,\r\n depth: int,\r\n ) -> float:\r\n if state.end_of_game or depth == 0:\r\n phi = self.heuristic.evaluate(state)\r\n else:\r\n phi = -np.inf\r\n\r\n successors = self.generate_successors(state)\r\n for successor in successors:\r\n if self.verbose > 1:\r\n print('{}: {}'.format(state.board, phi))\r\n\r\n successor_beta = self._min_value(\r\n successor, alpha, beta, depth - 1\r\n )\r\n\r\n # Maximizing the min value\r\n if (successor_beta > phi):\r\n phi = successor_beta\r\n\r\n # Pruning\r\n if phi >= beta:\r\n return phi\r\n\r\n alpha = max(alpha, phi)\r\n\r\n if self.verbose > 1:\r\n print('{}: {}'.format(state.board, beta))\r\n\r\n return phi", "def rough_l_max(L):\r\n # TODO: Check if L is sparse or not, and handle the situation accordingly\r\n\r\n l_max = np.linalg.eigvalsh(L.todense()).max()\r\n\r\n\r\n l_max_ub = 1.01 * l_max\r\n return l_max_ub", "def max_decode(M):\r\n return scipy.array([ f.val.argmax() for f in M])", "def max_value(self, game, depth, alpha, beta):\n if self.time_left() < self.TIMER_THRESHOLD: # Timeout check\n raise SearchTimeout()\n\n if game.is_loser(self) or game.is_winner(self) or depth == 0: # Terminal test, checks base cases\n return self.score(game,self) # returns the score, UTILITY of the current state\n\n legal_moves = game.get_legal_moves() # obtain all the available moves on the board\n best_score = -math.inf # abstraction assignment of neg. infinity\n\n for m in legal_moves: # iterate through available moves - ACTIONS available to the state\n new_state = game.forecast_move(m)\n # for each move - ACTION, create the outcome of that move - RESULT of each ACTION resulting in a new state\n score = self.min_value(new_state, depth - 1, alpha, beta) # recursive call to min - using new state, alpha and beta\n best_score = max(best_score, score) # calculate max between best_score and score\n if best_score >= beta: # check if best score is greater than or equal to beta\n return best_score # return best score\n alpha = max(alpha, best_score) # calculate max between alpha and best_score\n return best_score # propagate max and return its value", "def disp_to_depth(disp, min_depth, max_depth):\n min_disp = 1 / max_depth\n max_disp = 1 / min_depth\n scaled_disp = min_disp + (max_disp - min_disp) * disp\n depth = 1 / scaled_disp\n return scaled_disp, depth", "def main_trees(df, prev_id=None):\n if not df.scale.is_monotonic_decreasing:\n raise RuntimeError(\"`df.scale` is not descending.\")\n\n a_uniq = df.scale.unique()\n mmp = np.zeros_like(a_uniq, dtype=int)\n if prev_id is None:\n prev_id = df[df.scale == a_uniq[0]].id.values[0]\n for i, a in enumerate(a_uniq):\n if i == 0 and a == 1:\n mmp[i] = df[df.id == prev_id].index[0]\n continue\n msk = (df.scale == a) & (df.desc_id == prev_id)\n if msk.sum() == 0:\n # We have reached the end of the branch.\n break\n mmp[i] = df[msk].mvir.idxmax()\n prev_id = df.loc[mmp[i]].id\n return mmp[mmp > 0]", "def max_level(board):\n acc_board = accum_board(board)\n for row in acc_board:\n row.append(0)\n acc_board.append([0]*len(acc_board[0]))\n m, n = len(board), len(board[0])\n max_level_sum = float('-inf')\n top_left = None\n for i in range(m):\n for j in range(n):\n for k in range(min(m-i, n-j)):\n level = (acc_board[i+k][j+k] +\n acc_board[i-1][j-1] -\n acc_board[i-1][j+k] -\n acc_board[i+k][j-1])\n if level > max_level_sum:\n max_level_sum = level\n top_left = (j+1, i+1, k+1)\n return top_left", "def max_cardinality():\r\n #create a list containing the number of each vertex involvement.\r\n array = []\r\n for i in adj:\r\n array += [i[0],i[1]]\r\n\r\n #compute the degree by counting the involment\r\n degree = Counter(array).most_common()\r\n\r\n #retrieve the degree only\r\n degree_ = [ i[1] for i in degree]\r\n\r\n degree_ = np.array(degree_)\r\n \r\n max_m = None\r\n \r\n #check if m is valid\r\n for i in range(degree[0][1]+2)[2:]:\r\n \r\n #valid if there are at least m vertex with degree equals to at least m-1 \r\n if i < len(np.where(degree_>=i-1)[0]):\r\n max_m = i\r\n else:\r\n break\r\n max_m += 1\r\n print(f'maximum possible clique cardinality :{max_m}')\r\n return max_m" ]
[ "0.6801275", "0.6496621", "0.6444544", "0.62129825", "0.6179012", "0.60663015", "0.59735364", "0.5784729", "0.5781161", "0.5756161", "0.5747669", "0.573456", "0.5719743", "0.56770504", "0.5676199", "0.56210965", "0.5609796", "0.5604197", "0.55953497", "0.55722934", "0.5571747", "0.55668515", "0.55534834", "0.5546595", "0.5534494", "0.5525665", "0.5520743", "0.55197525", "0.55168694", "0.5513239", "0.5500589", "0.5500505", "0.54896545", "0.54780394", "0.54701525", "0.5465997", "0.5465942", "0.54614025", "0.5441153", "0.54319483", "0.54300034", "0.5415576", "0.5406384", "0.5398526", "0.53971803", "0.5386076", "0.5384197", "0.53771806", "0.53637356", "0.5363321", "0.5360428", "0.5351373", "0.5347925", "0.5345526", "0.53293675", "0.5322102", "0.5293609", "0.5293052", "0.52742606", "0.52693224", "0.526358", "0.5262767", "0.52603", "0.525303", "0.525303", "0.525303", "0.525303", "0.525303", "0.525303", "0.52480626", "0.5230893", "0.5226234", "0.52232426", "0.5221533", "0.5220484", "0.5217182", "0.52085674", "0.5206076", "0.5200867", "0.5188535", "0.5186454", "0.5182842", "0.5178483", "0.5169785", "0.51695997", "0.5169423", "0.5169097", "0.51674974", "0.5163505", "0.5162807", "0.5162741", "0.5160866", "0.5160483", "0.5158898", "0.5158175", "0.5154985", "0.5145977", "0.5145136", "0.5142976", "0.51394415" ]
0.59691375
7
Applies default secthresh & exclusion radius constraints
def apply_default_constraints(self): try: self.apply_secthresh(pipeline_weaksec(self.koi)) except NoWeakSecondaryError: logging.warning('No secondary eclipse threshold set for {}'.format(self.koi)) self.set_maxrad(default_r_exclusion(self.koi))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def constraints(self):\n ...", "def _discretize(self, constraints_object):\n pass", "def objects_radius(self, centre, radius):", "def apply_constraint(self):\n\t\tself.angle = self.constraint(self.angle) % 360", "def cutout(self, centre, radius):", "def constraints(self, x):\n pass", "def _on_configure(self, event):\n self.radius = (min(event.width, event.height) - 2 * self.circ_pad) / 2", "def get_receptive_field_radius(self):\n raise NotImplementedError()", "def set_auto_throats_radius(self):\n\n for n1, n2 in self.graph.edges:\n self.graph[n1][n2]['radius'] = self._compute_auto_throat_radius(\n n1, n2)", "def __init__(self, radius=1, thickness=1, inner_radius=0):\n\n super().__init__()\n self.radius = radius\n self.inner_radius = inner_radius\n self.thickness = thickness", "def get_radius(self):", "def contractor(self, *args, **kwargs):\n vertices = copy.deepcopy(args[0])\n nrange = len(vertices[0])\n xpts = []\n ypts = []\n for i in range(nrange):\n xpts.append(vertices[0][i].value)\n ypts.append(vertices[1][i].value)\n constraint = copy.deepcopy(args[1])\n \n \n \n \n qxdot,qxddot,qydot,qyddot = self.update_allq(xpts,ypts)\n \n ## the all important computation split (need to abstract this kind of thing)\n ##lhs = (np.sqrt(qxdot*qxdot + qydot*qydot)**3.) *constraint\n lhs = ( ( np.sqrt(qxdot**2 + qydot**2) )**3 )*constraint\n \n # check2 = qxdot*qyddot\n # if check2.width() < 1.e-2:\n # check2.min.value = check2.real.value\n # check2.max.value = check2.real.value\n # t1 = (lhs - check2)/qydot\n \n #\n # qyddot\n #\n check2 = qydot*qxddot\n if check2.width() < 1.e-2 and check2.contains(0.):\n check2.inf = 0.\n check2.sup = 0.\n #if qxdot.contains(0.) and abs(qxdot.min.value)>1.e-6:\n # print 'qxdot = ',qxdot\n # print 'qxdot not invertable, implement other logic please'\n if abs(float(qxdot.inf))<1.e-6:\n qxdot.inf = 1.e-10\n print 'invert qxdot'\n print 'qxdot = ', qxdot\n \n #t1 = (lhs + qydot*qxddot)/(qxdot)\n t1 = (lhs + check2)/(qxdot)\n \n t1 = t1 & qyddot # go ahead and shrink t1 to qyddot - they are logically equivalent\n total_ans = []\n useful_indices = []\n bad_indices = []\n for i in range(len(ypts)): \n min_ans = 0.\n for j in range(len(ypts)):\n if j==i:\n pass\n else:\n min_ans = (ypts[j]*float(self.localBasis[2,j])) + min_ans\n min_ans = t1 - min_ans\n if (abs(float(self.localBasis[2,i])) > 0.0):\n min_ans = min_ans/float(self.localBasis[2,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(ypts, total_ans)\n for i in useful_indices:\n if new_ans[i].isempty == False: # abs( new_ans[i].width() ) > 0.:\n ypts[i] = ypts[i] & new_ans[i]\n qxdot,qxddot,qydot,qyddot = self.update_allq(xpts,ypts)\n else:\n print 'warning, possible constraint violation, curvature 1'\n \n ## \n ## qxdot\n ##\n check2 = qydot*qxddot\n if check2.width() < 1.e-2 and check2.contains(0.):\n check2.inf = 0.\n check2.sup = 0.\n #if qyddot.contains(0.):\n # print 'qyddot = ',qyddot\n # print 'qyddot not invertable, implement other logic please'\n \n if qyddot.contains(0.) and qyddot.width()<1.e-6:\n qxdot.inf = 0.#1.e-10\n print 'invert qyddot'\n print 'qyddot = ',qyddot\n fix = (lhs + check2)*(1./qyddot)#*(qyddot**-1.)\n fix = fix & qxdot # go ahead and shrink fix to qxdot - they are logically equivalent\n total_ans = []\n useful_indices = []\n bad_indices = []\n \n for i in range(len(xpts)): #contract on x[i]\n min_ans = 0.\n for j in range(len(xpts)): # add up all jth pieces of the dot product except i\n if j==i:\n pass\n else:\n \n min_ans = (xpts[j]*float(self.localBasis[1,j] ) ) + min_ans\n min_ans = fix - min_ans\n if (abs(float(self.localBasis[1,i]) ) >0.0 ):\n min_ans = min_ans/float(self.localBasis[1,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(xpts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n xpts[i] = xpts[i] & new_ans[i]\n qxdot,qxddot,qydot,qyddot = self.update_allq(xpts,ypts)\n else:\n print 'warning, possible constraint violation, curvature 2'\n \n \n ## switch to the other side\n \n ##\n ## contract on qydot\n ##\n check2 = qxdot*qyddot\n if check2.width() < 1.e-2 and check2.contains(0.):\n check2.inf = 0.\n check2.sup = 0.\n# if qxddot.contains(0.):\n# print 'qxddot = ',qxddot\n# print 'qxddot not invertable, implement other logic please'\n# qxddot.min.value = 0.\n if qxddot.contains(0.):\n qxddot.inf = 0.\n \n print 'invert qxddot'\n print 'qxddot = ',qxddot\n t1 = (lhs - check2)/(-qxddot)#*(-qxddot**-1)\n t1 = t1 & qydot\n total_ans = []\n useful_indices = []\n bad_indices = []\n for i in range(len(ypts)): \n min_ans = 0.\n for j in range(len(ypts)):\n if j==i:\n pass\n else:\n #print 't1 = ',t1\n #print 'ypts[{}] = {}'.format(i,ypts[i])\n #print 'localbasis[{},{}] = {}'.format(1,i,self.localBasis[1,j])\n min_ans = (ypts[j]*float(self.localBasis[1,j])) + min_ans\n min_ans = t1 - min_ans\n if (abs(float(self.localBasis[1,i])) > 0.0):\n min_ans = min_ans/float(self.localBasis[1,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(ypts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n ypts[i] = ypts[i] & new_ans[i]\n else:\n print 'warning, possible constraint violation, curvature 3'\n \n ##contract on qxdot\n \n check2 = qxdot*qyddot\n if check2.width() < 1.e-2 and check2.contains(0.):\n check2.inf = 0.\n check2.sup = 0.\n #contract on qxddot\n# if qydot.contains(0.):\n# print 'qydot = ',qxddot\n# print 'qydot not invertable, implement other logic please'\n if qydot.contains(0.):\n qydot.inf = 0.\n print 'invert qydot'\n print 'qydot = ',qydot\n fix = (lhs - qxdot*qyddot)/(-qydot)#*(-qydot**-1)\n fix = fix & qxddot # go ahead and shrink t1 to quddot - they are logically equivalent\n total_ans = []\n useful_indices = []\n bad_indices = []\n for i in range(len(xpts)):\n min_ans = 0.\n for j in range(len(xpts)):\n if j==i:\n pass\n else:\n min_ans = (xpts[j]*float(self.localBasis[2,j] ) ) + min_ans\n min_ans = fix - min_ans\n if (abs(float(self.localBasis[2,i]) ) >0.0 ):\n min_ans = min_ans/float(self.localBasis[2,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(xpts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n xpts[i] = xpts[i] & new_ans[i]\n else:\n print 'warning, possible constraint violation, curvature 4'\n \n for i in range(nrange):\n vertices[0][i].value = xpts[i]\n vertices[1][i].value = ypts[i]\n return vertices", "def check_overlapping(self, fit_radius=True, merge=True, mindist='auto', update_geometry=False):\n\n from scipy.spatial.distance import cdist\n from scipy.spatial import cKDTree\n # index = list(self.graph)[:]\n # centers = np.array(list(zip(*nx.get_node_attributes(self.graph,'center').values()))).T\n # pores_radii = np.fromiter(nx.get_node_attributes(self.graph,'radius').values(),dtype=np.float)\n\n pores_radii = list(nx.get_node_attributes(\n self.graph, 'radius').items())\n # we begin by the bigger pores\n pores_radii.sort(key=lambda tup: tup[1], reverse=True)\n index, pores_radii = zip(*pores_radii)\n pores_radii = np.array(pores_radii)\n\n centers = nx.get_node_attributes(self.graph, 'center')\n centers = [np.array(centers[i]) for i in index]\n centers = np.array(centers)\n # distances = cdist(centers,centers)\n kdtree = cKDTree(centers)\n\n stop = False\n\n while not stop:\n\n stop = True\n\n for i, n1 in enumerate(index):\n\n #distances = cdist(centers,[self.graph.nodes[n1]['center']])[:,0]\n\n if self.graph.has_node(n1):\n\n if mindist == 'auto':\n gap = self.graph.nodes[n1]['radius']*0.02\n else:\n gap = mindist\n\n labels = kdtree.query_ball_point(\n self.graph.nodes[n1]['center'], 2.5*self.graph.nodes[n1]['radius'])\n labels.remove(i)\n # distances,labels = kdtree.query(x=net.graph.nodes[n1]['center'],2*self.graph.nodes[n1]['radius'],n_jobs=1)\n # labels.remove(i)\n #distance *= 0.998\n distances = cdist(centers[labels], [self.graph.nodes[n1]['center']])[\n :, 0]*0.998\n d = distances - pores_radii[labels]\n d -= self.graph.nodes[n1]['radius']\n # On commence par la distance la plus faible\n d_and_labels = [(d[j], k) for j, k in enumerate(labels)]\n d_and_labels.sort(key=lambda t: t[0])\n\n for (dist, ind) in d_and_labels:\n\n n2 = index[ind]\n if self.graph.has_node(n2) and self.graph.has_node(n1):\n\n # Le centre du pore né est dans la sphère du pore n1 OU il y a overlapping et fit_radius == False\n # -> Merging ou suppression du pore de plus petit rayon\n if (dist + self.graph.nodes[n2]['radius'] <= gap) or (dist < gap and dist + self.graph.nodes[n2]['radius'] > gap and not fit_radius):\n\n if (self.graph.nodes[n1]['radius'] >= self.graph.nodes[n2]['radius']):\n if merge:\n self.merge_pores(n1, n2)\n print(\"pore\", n1, \"and\", n2,\n \"overlap: merging (deleting\", n2, \")\")\n else:\n self.remove_pore(n2)\n print(\"pore\", n1, \"and\", n2,\n \"overlap: deleting\", n2)\n\n else:\n if merge:\n self.merge_pores(n2, n1)\n print(\"pore\", n1, \"and\", n2,\n \"overlap: merging (deleting\", n1, \")\")\n else:\n self.remove_pore(n1)\n print(\"pore\", n1, \"and\", n2,\n \"overlap: deleting\", n2)\n # On termine l'itération car le pore n1 n'existe plus...\n break\n\n # Overlapping et fit_radius == True\n # 3 options:\n # -Le rayon du pore le plus petit est modifié\n # -Merging\n # -Suppression\n elif dist < gap and dist + self.graph.nodes[n2]['radius'] > gap and fit_radius:\n if (self.graph.nodes[n1]['radius'] >= self.graph.nodes[n2]['radius']):\n r = dist + \\\n self.graph.nodes[n2]['radius'] - \\\n self.graph.nodes[n1]['radius'] - gap\n if self.graph.nodes[n2]['radius'] >= r and r > 0:\n self.graph.nodes[n2]['radius'] = r\n pores_radii[ind] = r\n print(\n \"pore\", n1, \"and\", n2, \"overlap: changin radius of\", n2, \"to\", r)\n else:\n if merge:\n self.merge_pores(n1, n2)\n print(\n \"pore\", n1, \"and\", n2, \"overlap: merging (deleting\", n2, \")\")\n else:\n self.remove_pore(n2)\n print(\"pore\", n1, \"and\", n2,\n \"overlap: deleting\", n2)\n else:\n if self.graph.nodes[n1]['radius'] >= dist:\n self.graph.nodes[n1]['radius'] = dist\n pores_radii[i] = dist\n print(\n \"pore\", n1, \"and\", n2, \"overlap: changin radius of\", n1, \"to\", dist)\n else:\n if merge:\n self.merge_pores(n2, n1)\n print(\n \"pore\", n1, \"and\", n2, \"overlap: merging (deleting\", n1, \")\")\n else:\n self.remove_pore(n1)\n print(\"pore\", n1, \"and\", n2,\n \"overlap: deleting\", n1)\n # On termine l'itération car le pore n1 n'existe plus...\n break\n\n if update_geometry:\n self.set_auto_throats_length()\n self.set_auto_throats_radius()", "def _constraints_other(self):\n pass", "def add_corridor_constraint(self,seg,r,weight=1.0):\n\n constraint_type = \"cylinder\"\n params = dict()\n params['x1'] = np.array([ self.qr_polytraj.waypoints['x'][0,seg],\n self.qr_polytraj.waypoints['y'][0,seg],\n self.qr_polytraj.waypoints['z'][0,seg]])\n params['x2'] = np.array([ self.qr_polytraj.waypoints['x'][0,seg+1],\n self.qr_polytraj.waypoints['y'][0,seg+1],\n self.qr_polytraj.waypoints['z'][0,seg+1]])\n params['der'] = 0\n params['l'] = r # Give the same radius buffer on the end caps\n params['r'] = r\n params['weight'] = weight\n params['keep_out'] = False\n params['active_seg'] = seg\n\n\n self.qr_polytraj.add_constraint(constraint_type,params,dynamic_weighting=False,sum_func=False)", "def _constraints_external(self):\n pass", "def __init__(self, minRA, maxRA, minDec, maxDec, radius_RA, radius_Dec):\n\n self.RA = np.mean([minRA, maxRA])\n self.Dec = np.mean([minDec, maxDec])\n self.radius_RA = radius_RA\n self.radius_Dec = radius_Dec\n\n # define the polygon attached to this area\n \"\"\"\n self.area_poly = areap(self.RA-radius_RA/2.,\n self.RA+radius_RA/2.,\n self.Dec-radius_Dec/2.,\n self.Dec+radius_Dec/2.)\n \"\"\"\n self.area_poly = areap(minRA, maxRA, minDec, maxDec)\n all_patches = self.getpatches(minRA, maxRA, minDec, maxDec)\n\n self.patches = self.inside(all_patches)", "def _constraints_utility(self):\n\n def rule(model):\n total = summation(self.utilities, model.A)\n return model.A_total == total\n\n self.model.constrain_A_total = Constraint(rule=rule)\n\n def rule(model):\n total = 2 * summation(self.utilities, model.A2)\n return model.A2_total == total\n\n self.model.constrain_A2_total = Constraint(rule=rule)\n\n def rule(model):\n total = 3 * summation(self.utilities, model.A3)\n return model.A3_total == total\n\n self.model.constrain_A3_total = Constraint(rule=rule)\n\n def rule(model):\n total = 4 * summation(self.utilities, model.A4)\n return model.A4_total == total\n\n self.model.constrain_A4_total = Constraint(rule=rule)\n\n def rule(model):\n completion_bonus = self.task_completion_bonus * self.task_duration\n total = summation(completion_bonus, model.T_total)\n return model.Completion_total == total\n\n self.model.constrain_completion_total = Constraint(rule=rule)\n\n def rule(model):\n scaling = 0.2\n affinity = np.outer(c.AFFINITY_COGNITIVE, self.task_cognitive_load)\n\n # TODO(cathywu) replace this code when \"simple slicing\" is clarified\n zeros1 = np.zeros((1, self.num_tasks))\n zeros2 = np.zeros((2, self.num_tasks))\n zeros3 = np.zeros((3, self.num_tasks))\n\n total = summation(affinity, model.A)\n total += summation(affinity, model.A2)\n total += summation(affinity, model.A3)\n total += summation(affinity, model.A4)\n\n total += summation(np.vstack((affinity[1:, :], zeros1)), model.A2)\n total += summation(np.vstack((affinity[1:, :], zeros1)), model.A3)\n total += summation(np.vstack((affinity[1:, :], zeros1)), model.A4)\n\n total += summation(np.vstack((affinity[2:, :], zeros2)), model.A3)\n total += summation(np.vstack((affinity[2:, :], zeros2)), model.A4)\n\n total += summation(np.vstack((affinity[3:, :], zeros3)), model.A4)\n total *= scaling\n\n return model.Affinity_cognitive_total == total\n\n self.model.constrain_affinity_cognitive_total = Constraint(rule=rule)", "def horizontal_radius(self):\n raise NotImplementedError", "def __init__(self,r):\n self.radius = r\n self.uc_centered_a = r\n self.uc_centered_b = r*np.sqrt(3.0)", "def radius(self,xc=None,yc=None):\n if xc == None:\n xc = self.x1\n if yc == None:\n yc = self.y1\n self.r = sqrt((self.x-xc)**2+(self.y-yc)**2)", "def define_potential(self) -> hoomd.md.pair.pair:\n self.potential_args.setdefault('r_cut', 2.5)\n potential = self.potential(\n **self.potential_args,\n nlist=hoomd.md.nlist.cell()\n )\n for i, j in combinations_with_replacement(self._radii.keys(), 2):\n potential.pair_coeff.set(i, j, epsilon=1, sigma=self._radii[i] + self._radii[j])\n return potential", "def __init__(self, radius):\n self.radius = radius", "def build_constraints_boundaries(self):\n\n # Trapezoidal and Hermite-Simpson methods can't compute\n # defects at the last node contrary to pseudospectral methods\n coll_method = self.options['tr_method'] in [\n 'trapezoidal', 'hermite-simpson']\n n_nodes = self.problem.prm['n_nodes'] - \\\n 1 if coll_method else self.problem.prm['n_nodes']\n\n # Defects lower and upper boundaries\n defects_low = np.zeros(\n self.problem.prm['n_states'] * n_nodes)\n defects_upp = np.zeros(\n self.problem.prm['n_states'] * n_nodes)\n\n # Path lower and upper boundaries\n path_low = np.hstack([self.problem.low_bnd.path]\n * (self.problem.prm['n_nodes']))\n path_upp = np.hstack([self.problem.upp_bnd.path]\n * (self.problem.prm['n_nodes']))\n\n # Events lower and upper boundaries\n event_low = self.problem.low_bnd.event\n event_upp = self.problem.upp_bnd.event\n\n # Assembly of the lower and upper boundaries vectors\n low = np.concatenate((defects_low, path_low, event_low))\n upp = np.concatenate((defects_upp, path_upp, event_upp))\n\n return low, upp", "def fix_curvature(self) -> None:\n self.n1.fix = True\n self.n2.fix = True", "def get_radius(self):\r\n return 1", "def constraints(self):\n constraints = np.concatenate( (np.ravel(self.noise_var_constraint), \n self.kern.constraints), axis=0)\n return constraints", "def __addValueConstraints(self):\n for x in range(self.width):\n for y in range(self.height):\n g = self.grid[(x, y)]\n self.solver.add(\n Or([g == Magnets.EMPTY, g == Magnets.PLUS, g == Magnets.MINUS]))\n if x > 0:\n left = self.grid[(x-1, y)]\n self.solver.add(Or([g != left, g == Magnets.EMPTY]))\n if y > 0:\n up = self.grid[(x, y-1)]\n self.solver.add(Or([g != up, g == Magnets.EMPTY]))", "def __init__(self, constraint):\n self.__filter__ = []\n if constraint.get('range', None):\n constraint_range = constraint['range']\n range_start = constraint_range.get('start', None)\n range_end = constraint_range.get('end', None)\n if range_start:\n self.__filter__.append(lambda x: x >= float(range_start))\n if range_end:\n self.__filter__.append(lambda x: x <= float(range_end))\n if bool(constraint.get('unique', None)):\n self.__seen__ = {}\n\n def unique_check(x):\n if self.__seen__.get(x, None) != None:\n return False\n else:\n self.__seen__[x] = True\n return True\n self.__filter__.append(unique_check)\n self.__allow_null__ = bool(constraint.get('allow_null', None))", "def validation(self):\r\n\r\n if self.__radius <= 0:\r\n raise ValueError(\"the input radius must be a positive number\")", "def rigid_rings(self):\n raise NotImplementedError", "def constrain_pars(model_info, pars):\n name = model_info['id']\n # if it is a product model, then just look at the form factor since\n # none of the structure factors need any constraints.\n if '*' in name:\n name = name.split('*')[0]\n\n if name == 'capped_cylinder' and pars['cap_radius'] < pars['radius']:\n pars['radius'], pars['cap_radius'] = pars['cap_radius'], pars['radius']\n if name == 'barbell' and pars['bell_radius'] < pars['radius']:\n pars['radius'], pars['bell_radius'] = pars['bell_radius'], pars['radius']\n\n # Limit guinier to an Rg such that Iq > 1e-30 (single precision cutoff)\n if name == 'guinier':\n #q_max = 0.2 # mid q maximum\n q_max = 1.0 # high q maximum\n rg_max = np.sqrt(90*np.log(10) + 3*np.log(pars['scale']))/q_max\n pars['rg'] = min(pars['rg'], rg_max)\n\n if name == 'rpa':\n # Make sure phi sums to 1.0\n if pars['case_num'] < 2:\n pars['Phia'] = 0.\n pars['Phib'] = 0.\n elif pars['case_num'] < 5:\n pars['Phia'] = 0.\n total = sum(pars['Phi'+c] for c in 'abcd')\n for c in 'abcd':\n pars['Phi'+c] /= total", "def fill_swh(self,radius):\n self.swh_fill = gs.geo_convolve(self.swh,self,\n radius,[0.0,10.0],mask=False)*self.mask\n \n self.swh_fill[self.lats.T>88] = np.nan\n self.swh_fill[self.lats.T<60] = np.nan\n # also convolve the weights\n w_fill = gs.geo_convolve(self.total_w*self.swh_mask,self,\n radius/2,[0.0,10.0],mask=False)\n w_mask = np.ones_like(w_fill)\n w_mask[np.isnan(w_fill)] = np.nan\n w_mask[w_fill<2.0] = np.nan\n self.swh_fill_mask = w_mask\n self.swh_fill = self.swh_fill*w_mask", "def SetPRCatConstraint(self, model ) :\n tot = np.multiply(self.wish, self.dispo)\n for line in tot :\n for val in line :\n if not val : continue\n if self.bound>0 : model += val <= self.valBound\n elif self.bound<0 : model += val >= self.valBound", "def monotonic_contractor(self, *args, **kwargs):\n vertices = copy.deepcopy(args[0])\n nrange = len(vertices[0])\n xpts = []\n ypts = []\n for i in range(nrange):\n xpts.append(vertices[0][i].value)\n xpts.append(vertices[1][i].value)\n constraint = copy.deepcopy(args[1])\n \n \n # compute automatic differentiated curvature:\n qxdot = np.dot(xpts,self.localBasis[1,:])\n qxddot = np.dot(xpts,self.localBasis[2,:])\n qydot = np.dot(ypts,self.localBasis[1,:])\n qyddot = np.dot(ypts,self.localBasis[2,:]) \n #computation of doubledots is expanded below\n \n \n ## the all important computation split (need to abstract this kind of thing)\n ##lhs = ((np.sqrt(qxdot*qxdot + qydot*qydot) )**3. )*constraint\n lhs = (np.sqrt(qxdot**2 + qydot**2)**3.) *constraint\n \n # check2 = qxdot*qyddot\n # if check2.width() < 1.e-2:\n # check2.min.value = check2.real.value\n # check2.max.value = check2.real.value\n # t1 = (lhs - check2)/qydot\n \n #\n # qyddot\n #\n check2 = qydot*qxddot\n #if check2.width() < 1.e-2:\n # check2.min.value = check2.real.value\n # check2.max.value = check2.real.value\n if qxdot.contains(0.):\n print 'qxdot = ',qxdot\n print 'qxdot not invertable, implement other logic please'\n else:\n print 'invert qxdot'\n print 'qxdot = ', qxdot\n t1 = (lhs + qydot*qxddot)/(qxdot)#*(qxdot**-1.)\n t1 = t1 & qyddot # go ahead and shrink t1 to qyddot - they are logically equivalent\n total_ans = []\n useful_indices = []\n bad_indices = []\n for i in range(len(ypts)): \n min_ans = 0.\n for j in range(len(ypts)):\n if j==i:\n pass\n else:\n min_ans = (t1 - ypts[j]*float(self.localBasis[2,j])) + min_ans\n if (abs(float(self.localBasis[2,i])) > 0.0):\n min_ans = min_ans/float(self.localBasis[2,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n new_ans = vector_AND_(ypts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n ypts[i] = ypts[i] & new_ans[i]\n \n ## \n ## qxdot\n ##\n \n if qyddot.contains(0.):\n print 'qyddot = ',qyddot\n print 'qyddot not invertable, implement other logic please'\n else:\n print 'invert qyddot'\n print 'qyddot = ',qyddot\n fix = (lhs + qydot*qxddot)/(qyddot)#*(qyddot**-1.)\n fix = fix & qxdot # go ahead and shrink fix to qxdot - they are logically equivalent\n total_ans = []\n useful_indices = []\n bad_indices = []\n \n for i in range(len(xpts)): #contract on x[i]\n min_ans = 0.\n for j in range(len(xpts)): # add up all jth pieces of the dot product except i\n if j==i:\n pass\n else:\n \n min_ans = (fix - xpts[j]*float(self.localBasis[1,j] ) ) + min_ans\n if (abs(float(self.localBasis[1,i]) ) >0.0 ):\n min_ans = min_ans/float(self.localBasis[1,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(xpts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n xpts[i] = xpts[i] & new_ans[i]\n \n \n ## switch to the other side\n \n ##\n ## contract on qydot\n ##\n check2 = qxdot*qyddot\n #if check2.width() < 1.e-2:\n # check2.min.value = check2.real.value\n # check2.max.value = check2.real.value\n if qxddot.contains(0.):\n print 'qxddot = ',qxddot\n print 'qxddot not invertable, implement other logic please'\n else:\n print 'invert qxddot'\n print 'qxddot = ',qxddot\n t1 = (lhs - qxdot*qyddot)/(-qxddot)#*(-qxddot**-1)\n t1 = t1 & qydot\n total_ans = []\n useful_indices = []\n bad_indices = []\n for i in range(len(ypts)): \n min_ans = 0.\n for j in range(len(ypts)):\n if j==i:\n pass\n else:\n #print 't1 = ',t1\n #print 'ypts[{}] = {}'.format(i,ypts[i])\n #print 'localbasis[{},{}] = {}'.format(1,i,self.localBasis[1,j])\n min_ans = (t1 - ypts[j]*float(self.localBasis[1,j])) + min_ans\n if (abs(float(self.localBasis[1,i])) > 0.0):\n min_ans = min_ans/float(self.localBasis[1,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(ypts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n ypts[i] = ypts[i] & new_ans[i]\n \n ##contract on qxdot\n \n \n #contract on qxddot\n if qydot.contains(0.):\n print 'qydot = ',qxddot\n print 'qydot not invertable, implement other logic please'\n else:\n print 'invert qydot'\n print 'qydot = ',qydot\n fix = (lhs - qxdot*qyddot)/(-qydot)#*(-qydot**-1)\n fix = fix & qxddot # go ahead and shrink t1 to quddot - they are logically equivalent\n total_ans = []\n useful_indices = []\n bad_indices = []\n for i in range(len(xpts)):\n min_ans = 0.\n for j in range(len(xpts)):\n if j==i:\n pass\n else:\n \n min_ans = (fix - xpts[j]*float(self.localBasis[2,j] ) ) + min_ans\n if (abs(float(self.localBasis[2,i]) ) >0.0 ):\n min_ans = min_ans/float(self.localBasis[2,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(xpts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n xpts[i] = xpts[i] & new_ans[i]\n \n \n for i in range(nrange):\n vertices[0][i].value = xpts[i]\n vertices[1][i].value = ypts[i]\n \n return vertices", "def constrain_roi(self, frame):\n raise NotImplementedError", "def defineCircleLayout(self):\n # Define a 2-D array representing the position of each mesh point\n self.xPoints = self.frange(0,self.R,self.h)\n self.yPoints = self.frange(0,self.R,self.h)\n\n # Position of internal mesh points\n internal_xyCoord = [(i,j) for i in self.xPoints for j in self.yPoints if (i - self.R)**2 + (j - self.R)**2 < self.R^2] \n\n # Define the dictionary containing internal points\n for k in internal_xyCoord:\n x = k[0]\n y = k[1]\n xLabel = xPoints.index(x)\n yLabel = yPoints.index(y)\n self.internalPoints[(xLabel,yLabel)] = meshPoint(type = 'internal',x = x, y = y, xLabel = xLabel, yLabel = yLabel) \n\n # Position of the boundary points\n # Find the intersection of each mesh line with the circle\n # For a given vertical mesh line: \n # y = R - sqrt(R^2 - (x-R)^2) & y = R + sqrt(R^2 - (x-R)^2)\n # For a given horizontal mesh line: \n # x = R - sqrt(R^2 - (y-R)^2) & x = R + sqrt(R^2 - (y-R)^2)\n boundary_xyCoord = [(0,self.R),(self.R,0),(self.R,2*self.R),(2*self.R,self.R)] + [(x,self.R - math.sqrt(self.R**2 - (x-self.R)**2)) for x in self.xPoints[1:len(self.xPoints)-1]] + [(x,self.R - math.sqrt(self.R**2 + (x-self.R)**2)) for x in self.xPoints[1:len(self.xPoints)-1]] + [(self.R - math.sqrt(self.R**2 - (y-self.R)**2),y) for y in self.yPoints[1:len(yPoints)-1]] + [(self.R + math.sqrt(self.R**2 - (y-self.R)**2),y) for y in self.yPoints[1:len(yPoints)-1]] \n\n # Define the dictionary containing boundary points\n for k in boundary_xyCoord:\n x = k[0]\n y = k[1]\n [xLabel,yLabel] = self.findLabel(x,y)\n self.boundaryPoints[(xLabel,yLabel)] = meshPoint(type = 'boundary',x = x, y = y, xLabel = xLabel, yLabel = yLabel) \n \n # Now that we have assigned the labels we can define fE, fW, fN and fS\n self.fCalc()", "def scale_arc_constraints(blk):\n for arc in blk.component_data_objects(Arc, descend_into=True):\n arc_block = arc.expanded_block\n if arc_block is None: # arc not expanded or port empty?\n _log.warning(\n f\"{arc} has no constraints. Has the Arc expansion transform \"\n \"been applied?\"\n )\n continue\n warning = (\n \"Automatic scaling for arc constraints is supported for \"\n \"only the Equality rule. Variable {name} on Port {port} was \"\n \"created with a different rule, so the corresponding constraint \"\n \"on {arc_name} will not be scaled.\"\n )\n port1 = arc.ports[0]\n port2 = arc.ports[1]\n for name in port1.vars.keys():\n if not port1.is_equality(name):\n _log.warning(\n warning.format(name=name, port=port1.name, arc_name=arc.name)\n )\n continue\n if not port2.is_equality(name):\n _log.warning(\n warning.format(name=name, port=port2.name, arc_name=arc.name)\n )\n continue\n con = getattr(arc_block, name + \"_equality\")\n for i, c in con.items():\n if i is None:\n sf = min_scaling_factor([port1.vars[name], port2.vars[name]])\n else:\n sf = min_scaling_factor([port1.vars[name][i], port2.vars[name][i]])\n constraint_scaling_transform(c, sf)", "def __init__( self , center , radius ):\r\n self.center = center\r\n self.radius = radius", "def constrain(self):\n\n if self.rect.right < self.limit_left:\n self.limit_left = 0\n\n if self.rect.right > self.limit_right:\n self.limit_right = self.limit_right\n\n if self.rect.top <= 0:\n self.rect.top = 0\n\n if self.rect.bottom >= SCREEN_HEIGHT:\n self.rect.bottom = SCREEN_HEIGHT\n self.v_y = 0", "def __init__(self, constraints={}):\n self.constraints = constraints", "def force_bounds(self):\n return self._min_force, self._max_force", "def geometryConstraint(*args, layer: AnyStr=\"\", name: Union[AnyStr, bool]=\"\", remove: bool=True,\n targetList: bool=True, weight: Union[float, bool]=0.0, weightAliasList:\n bool=True, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[List[AnyStr], Any]:\n pass", "def _constraints_nonoverlapping_tasks(self):\n\n def rule(model, i):\n total = sum(model.A[i, j] for j in model.tasks)\n total += sum(model.A2[i, j] for j in model.tasks)\n total += sum(model.A3[i, j] for j in model.tasks)\n total += sum(model.A4[i, j] for j in model.tasks)\n if i > 0:\n total += sum(model.A2[i - 1, j] for j in model.tasks)\n total += sum(model.A3[i - 1, j] for j in model.tasks)\n total += sum(model.A4[i - 1, j] for j in model.tasks)\n if i > 1:\n total += sum(model.A3[i - 2, j] for j in model.tasks)\n total += sum(model.A4[i - 2, j] for j in model.tasks)\n if i > 2:\n total += sum(model.A4[i - 3, j] for j in model.tasks)\n return 0, total, 1\n\n self.model.constrain_nonoverlapping = Constraint(self.model.timeslots,\n rule=rule)", "def dp_radius(self, s, survey='SPIRE_500'):\n shape = np.array(s[survey].shape)\n cosPA, sinPA = np.cos(s['PA_RAD']), np.sin(s['PA_RAD'])\n cosINCL = s['cosINCL']\n w = s[survey + '_WCS']\n xcm, ycm = s['RA_RAD'], s['DEC_RAD']\n dp_coords = np.zeros([shape[0], shape[1], 2])\n # Original coordinate is (y, x)\n # :1 --> x, RA --> the one needed to be divided by cos(incl)\n # :0 --> y, Dec\n dp_coords[:, :, 0], dp_coords[:, :, 1] = \\\n np.meshgrid(np.arange(shape[1]), np.arange(shape[0]))\n # Now, value inside dp_coords is (x, y)\n # :0 --> x, RA --> the one needed to be divided by cos(incl)\n # :1 --> y, Dec\n for i in range(shape[0]):\n dp_coords[i] = Angle(w.wcs_pix2world(dp_coords[i], 1) * u.deg).rad\n dp_coords[:, :, 0] = 0.5 * (dp_coords[:, :, 0] - xcm) * \\\n (np.cos(dp_coords[:, :, 1]) + np.cos(ycm))\n dp_coords[:, :, 1] -= ycm\n # Now, dp_coords is (dx, dy) in the original coordinate\n # cosPA*dy-sinPA*dx is new y\n # cosPA*dx+sinPA*dy is new x\n if survey[:5] == 'GALEX':\n return np.sqrt((cosPA * dp_coords[:, :, 1] +\n sinPA * dp_coords[:, :, 0])**2 +\n ((cosPA * dp_coords[:, :, 0] -\n sinPA * dp_coords[:, :, 1]))**2) * \\\n s['DIST_MPC'] * 1.0E3 # Radius in kpc\n else:\n return np.sqrt((cosPA * dp_coords[:, :, 1] +\n sinPA * dp_coords[:, :, 0])**2 +\n ((cosPA * dp_coords[:, :, 0] -\n sinPA * dp_coords[:, :, 1]) / cosINCL)**2) * \\\n s['DIST_MPC'] * 1.0E3 # Radius in kpc", "def get_constraints(self):\n\n return vertcat(*self.g), self.g_min, self.g_max", "def circle_radius(self):\n return min([self.container.width, self.container.height]) / 4", "def setupPhysicalBounds(self):\n \n ### 2018-05-06 WIC - **do not** enforce +/- pi limits on the\n ### angles here.\n self.boundsPhysLo = np.array(\\\n [0.00, 0.00, 0., 0., -np.inf, -np.inf,-np.inf,0 ] )\n self.boundsPhysHi = np.array(\\\n [np.inf, np.inf, 1., np.inf, np.inf, np.inf,np.inf, np.inf ] )", "def initialize(self):\n super(CircTab,self).initialize()\n self.radius = 2\n # set x and y scales for the circle size and use checkXPos and\n # checkYPos instead of updateSize\n self.initialXScale.config(from_=-self.radius, to=self.radius,\n command=self.checkXPos,resolution=0.01)\n self.initialYScale.config(from_=-self.radius, to=self.radius,\n command=self.checkYPos,resolution=0.01)", "def _constraints_variables(self):\n\n def rule(model, k):\n \"\"\"\n Total slots allocated to category k\n \"\"\"\n ind_i = model.timeslots\n ind_i2 = model.timeslots2\n ind_i3 = model.timeslots3\n ind_i4 = model.timeslots4\n ind_j = model.tasks\n cat_k_total = sum(\n model.A[i, j] * self.task_category[j, k] for i in ind_i for j in\n ind_j)\n cat_k_total += 2 * sum(\n model.A2[i, j] * self.task_category[j, k] for i in ind_i2 for j\n in ind_j)\n cat_k_total += 3 * sum(\n model.A3[i, j] * self.task_category[j, k] for i in ind_i3 for j\n in ind_j)\n cat_k_total += 4 * sum(\n model.A4[i, j] * self.task_category[j, k] for i in ind_i4 for j\n in ind_j)\n return model.C_total[k] == cat_k_total\n\n self.model.constrain_cat_duration0 = Constraint(self.model.categories,\n rule=rule)\n\n def rule(model, s, k):\n \"\"\"\n S_cat[s,k] = whether (any tasks of) category k is assigned on day s\n \"\"\"\n den = sum(self.task_category[:, k])\n ind_j = model.tasks\n total = sum(self.task_category[j, k] * model.S[s, j] for j in\n ind_j) / den\n # Desired: S[i,j] = ceil(total)\n # Desired: S[i,j] = 0 if total <= 0; otherwise, S[i,j] = 1\n return -EPS, model.S_cat[s, k] - total, 1 - EPS\n\n self.model.constrain_cat_days0 = Constraint(self.model.dayslots,\n self.model.categories,\n rule=rule)\n\n def rule(model, k):\n \"\"\"\n S_cat_total[k] = number of unique days in which task from\n category k were assigned\n\n More precisely:\n sum_s S_cat[s,k] == S_cat_total[k]\n \"\"\"\n ind_s = model.dayslots\n total = sum(model.S_cat[s, k] for s in ind_s)\n return model.S_cat_total[k] == total\n\n self.model.constrain_cat_days1 = Constraint(self.model.categories,\n rule=rule)", "def removeBounded(self, bounds):\n if bounds==None or len(bounds)!=4:\n return\n x1,y1,x2,y2 = bounds\n if x1>x2 :\n temp=x1;x1=x2;x2=temp\n if y1>y2:\n temp=y1;y1=y2;y2=temp\n lst=[]\n for i in range(0,self.length()):\n x=self.x[i]; y=self.y[i]\n if (x>x1 and x<x2) and (y>y1 and y<y2): \n lst.append(i)\n self.removeMultiple(lst)\n return", "def __set_mask_regions(self):\n self.bottom_clip = np.int32(np.int32([[[60,0], [1179,0], [1179,650], [60,650]]]))\n self.roi_clip = np.int32(np.int32([[[640, 425], [1179,550], [979,719],\n [299,719], [100, 550], [640, 425]]]))", "def boundary_conditions(self):\n ce = 2 * self.dy * self.g * self.mu * self.m_u / self.kb\n self.e[0, :] = (4 * self.e[1, :] - self.e[2, :]) / (\n ce / self.T[0, :] + 3\n )\n self.rho[0, :] = (\n self.e[0, :]\n * (self.Y - 1)\n * self.mu\n * self.m_u\n / (self.kb * self.T[0, :])\n )\n self.u[0, :] = (4 * self.u[1, :] - self.u[2, :]) / 3\n self.w[0, :] = 0\n\n self.e[-1, :] = (4 * self.e[-2, :] - self.e[-3, :]) / (\n 3 - ce / self.T[-1, :]\n )\n self.rho[-1, :] = (\n self.e[-1, :]\n * (self.Y - 1)\n * self.mu\n * self.m_u\n / (self.kb * self.T[-1, :])\n )\n self.u[-1, :] = (4 * self.u[-2, :] - self.u[-3, :]) / 3\n self.w[-1, :] = 0", "def _compute_bounds(self, axis, view):\n return None", "def edge_check(self, person_radius=7):\n if self.position.x > self.width - person_radius and self.velocity.x > 0: # Check right bounds\n self.velocity = Vector(-self.velocity.x, self.velocity.y) \n elif self.position.x < person_radius and self.velocity.x < 0: # Check left bounds\n self.velocity = Vector(-self.velocity.x, self.velocity.y) \n \n if self.position.y > self.height - person_radius and self.velocity.y > 0: # Check bottom bounds\n self.velocity = Vector(self.velocity.x, -self.velocity.y) \n elif self.position.y < person_radius and self.velocity.y < 0: # Check top bounds\n self.velocity = Vector(self.velocity.x, -self.velocity.y)", "def stricter_radius(radius1, radius2):\n if radius1 > radius2:\n return radius2\n return radius1", "def gripStretchCircle(circle, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve):\n newCenter = QgsPoint(circle.center)\n newRadius = circle.radius\n \n for ptToStretch in ptListToStretch:\n if qad_utils.ptNear(ptToStretch, circle.center): # se i punti sono sufficientemente vicini\n newCenter.set(circle.center.x() + offSetX, circle.center.y() + offSetY)\n elif circle.isPtOnCircle(ptToStretch):\n newPt = QgsPoint(basePt.x() + offSetX, basePt.y() + offSetY)\n newRadius = qad_utils.getDistance(circle.center, newPt)\n\n newCircle = qad_circle.QadCircle()\n if newCircle.set(newCenter, newRadius) == False:\n return None\n \n return newCircle", "def settings_outside_clinical_bounds(cir, isf, sbr):\n return (\n (float(isf) < 10)\n | (float(isf) > 500)\n | (float(cir) < 2)\n | (float(cir) > 150)\n | (float(sbr) < 0.05)\n | (float(sbr) > 30)\n )", "def kick_from_eccentric_tides(self):\n\n inv_r_7 = self.inv_r**7\n r0_5 = self.pair[0].radius ** 5\n r1_5 = self.pair[1].radius ** 5\n m0_2 = self.pair[0].mass * self.pair[0].mass\n m1_2 = self.pair[1].mass * self.pair[1].mass\n\n ftr = -3.0 * inv_r_7 * constants.G * ((m1_2 * r0_5 * self.pair[0].kaps + m0_2 * r1_5 * self.pair[1].kaps) + # Non-dissipative\n 3 * self.inv_r * self.rdot_mag *\n (m1_2 * r0_5 * self.pair[0].kaps * self.pair[0].taulag\n + m0_2 * r1_5 * self.pair[1].kaps * self.pair[1].taulag)) # Dissipative\n\n hutforce = ftr * self.inv_r * self.pos\n kick0 = (1.0 / self.pair[0].mass) * hutforce * self.dt\n kick1 = (-1.0 / self.pair[1].mass) * hutforce * self.dt\n\n self.pair[0].velocity += kick0\n self.pair[1].velocity += kick1", "def restrict(self):\n\n cg = self.grid.coarse_like(2)\n\n c_edge_coeffs = EdgeCoeffs(cg, None, empty=True)\n\n c_eta_x = cg.scratch_array()\n c_eta_y = cg.scratch_array()\n\n fg = self.grid\n\n c_eta_x[cg.ilo:cg.ihi+2,cg.jlo:cg.jhi+1] = \\\n 0.5*(self.x[fg.ilo:fg.ihi+2:2,fg.jlo :fg.jhi+1:2] +\n self.x[fg.ilo:fg.ihi+2:2,fg.jlo+1:fg.jhi+1:2])\n\n # redo the normalization\n c_edge_coeffs.x = c_eta_x*fg.dx**2/cg.dx**2\n\n c_eta_y[cg.ilo:cg.ihi+1,cg.jlo:cg.jhi+2] = \\\n 0.5*(self.y[fg.ilo :fg.ihi+1:2,fg.jlo:fg.jhi+2:2] +\n self.y[fg.ilo+1:fg.ihi+1:2,fg.jlo:fg.jhi+2:2])\n\n c_edge_coeffs.y = c_eta_y*fg.dy**2/cg.dy**2\n\n return c_edge_coeffs", "def circle_mask(radius,size=None,offset=None,inner=0,subsample_limit=4,center=False):\n def subsample(x,y,sz,r,lim):\n d = np.hypot(x, y)\n if lim==0: #hit recursion limit\n #return area if x,y is inside circle\n return sz**2 if d < r else 0.0\n elif d + 0.70711*sz < r: #totally inside circle\n return sz**2\n elif d - 0.70711*sz > r: #totally outside circle\n return 0.0\n else: #on edge, recurse into quadrants\n s,o = sz/2, sz/4\n return subsample(x+o,y+o,s,r,lim-1) + \\\n subsample(x+o,y-o,s,r,lim-1) + \\\n subsample(x-o,y-o,s,r,lim-1) + \\\n subsample(x-o,y+o,s,r,lim-1)\n if offset is None:\n y0,x0 = 0,0\n else:\n y0,x0 = offset\n if size is None:\n size=2*radius+1\n if np.isscalar(size):\n size = (size,size)\n if center:\n y0 += 0.5*size[0]-0.5-radius\n x0 += 0.5*size[1]-0.5-radius\n coeffs = np.empty(size)\n for r in range(size[0]):\n for c in range(size[1]):\n x,y = c-radius,r-radius\n coeffs[r,c] = subsample(x-x0,y-y0,1,radius,subsample_limit)\n if inner > 0: \n coeffs[r,c] -= subsample(x-x0,y-y0,1,inner,subsample_limit) \n return coeffs", "def eggleton_roche_radius(self):\n return self.eggleton_roche_over_separation() * self.separation()", "def _fcn_minmax_roi(self):\n self.roi._update_cbar_minmax()\n self.cbqt.cbobjs._objs['roi']['clim'] = self.roi._clim\n kwargs = self.cbqt.cbobjs._objs['roi'].to_kwargs(True)\n self.roi.update_from_dict(kwargs)\n self.roi._update_cbar()", "def create_keep_in_constraint(self,der=2,limit=1e1,weight=1e5):\n print(\"Creating Keep in constraint\")\n constr = dict()\n constr['constraint_type'] = \"ellipsoid\"\n constr['weight'] = self.accel_weight\n constr['keep_out'] = False\n constr['der'] = der\n constr['x0'] = np.zeros(3)\n A = np.matrix(np.identity(3))\n limit = self.accel_lim\n A[0,0] = 1/limit**2\n A[1,1] = 1/limit**2\n A[2,2] = 1/limit**2\n constr['rot_mat'] = np.identity(3)\n constr['A'] = A\n\n\n self.qr_polytraj.add_constraint(constr['constraint_type'],constr,dynamic_weighting=False,sum_func=False)\n\n # self.qr_polytraj.run_astro()\n # self.update_path_markers()\n # acc_wp = self.get_accel_at_waypoints(\"main\")\n # self.interactive_marker_worker.make_controls(self.qr_polytraj.waypoints)\n # self.interactive_marker_worker.update_controls(self.qr_polytraj.waypoints,acc_wp = acc_wp)", "def circleMask(img, cir_x, cir_y, r, mode, filter=0):\n\n if not mode == 'interior' and not mode == 'exterior':\n print(mode, \"is not a supported mode. Please enter interior or exterior\")\n return 1\n\n #get the dimensions of the image\n n,m = img.shape\n\n #create an open grid for our image\n y,x = np.ogrid[0:n, 0:m]\n #operate on a copy of the image\n copyImg = img.copy()\n\n #get the x and y center points of our image\n center_x = cir_x\n center_y = cir_y\n\n #create a circle mask\n if mode == 'interior':\n circle_mask = (x-center_x)**2 + (y-center_y)**2 <= r**2\n elif mode == 'exterior':\n circle_mask = (x-center_x)**2 + (y-center_y)**2 >= r**2\n\n #black out anywhere within the circle mask\n copyImg[circle_mask] = [filter]\n copyImg[copyImg != filter] = [255-filter]\n\n return copyImg", "def condition_bounds(self) -> Tuple[float, float]:\n raise NotImplementedError", "def boundary_conditions(self):\n pass", "def __init__(self, nside, weights_map, lmax_factor=1.5, mask=None):\n self.params = sc.Parameters(nside, lmax_factor)\n self.weights_map = weights_map\n if mask is not None:\n self.mask = sc.Mask(mask, nside)\n else:\n self.mask = sc.Mask(np.ones(self.params.npix))", "def radius(self) -> int:\n pass", "def canvas_bounds(self) -> utils.BoxRegion:", "def remaining_constraints(self):\r\n \r\n def iec1(state,decision,nodes):\r\n return decision['E:L']+decision['E:R_1']<=nodes['E'].get_preds_value(state)\r\n def iec2(state,decision,nodes):\r\n return decision['R_1:L']<=nodes['R_1'].get_preds_value(state)\r\n def iec3(state,decision,nodes):\r\n return decision['G:R_1']>=-(nodes['R_1'].get_preds_value(state)) \r\n def iec4(state,decision,nodes):\r\n return decision['G:L']>=0.0\r\n def iec5(state,decision,nodes):\r\n return decision['E:L']>=0.0\r\n def iec6(state,decision,nodes):\r\n return decision['E:R_1']>=0.0\r\n def iec7(state,decision,nodes):\r\n return decision['R_1:L']>=0.0\r\n\r\n Inequality_Constraints=[iec1,iec2,iec3,iec4,iec5,iec6,iec7]\r\n \r\n return Inequality_Constraints", "def set_radius(self, radius):\n self._radius = radius\n self._reset_slot_bounds()", "def _init_optimizer_bounds(self):\n bounds = []\n for filt in self.filters:\n if filt.optimize_fc:\n bounds.append((np.log10(filt.min_fc), np.log10(filt.max_fc)))\n if filt.optimize_q:\n bounds.append((filt.min_q, filt.max_q))\n if filt.optimize_gain:\n bounds.append((filt.min_gain, filt.max_gain))\n return bounds", "def constrain(v2,w,h):\n if v2.x > w:\n v2.x = w\n if v2.x < 0:\n v2.x = 0 \n if v2.y > h:\n v2.y = h\n if v2.y < 0:\n v2.y = 0\n return v2", "def _set_constraint(self):\n pass", "def sector_mask(shape,centre,radius1, radius2 = 0, angle_range = (0, np.pi)):\n\n x,y = np.ogrid[:shape[0],:shape[1]]\n cx,cy = centre\n tmin,tmax = angle_range\n\n # ensure stop angle > start angle\n if tmax < tmin:\n tmax += 2*np.pi\n\n # convert cartesian --> polar coordinates\n r2 = (x-cx)*(x-cx) + (y-cy)*(y-cy)\n theta = np.arctan2(x-cx,y-cy) - tmin\n\n # wrap angles between 0 and 2*pi\n theta %= (2*np.pi)\n\n # circular mask\n circmask = (r2 <= radius1*radius1) & (r2 >= radius2*radius2)\n\n # angular mask\n anglemask = theta <= (tmax-tmin)\n\n return circmask*anglemask", "def make_tevcat_exclusion_mask():\n\n # TODO: make this a method ExclusionMask.from_catalog()?\n from gammapy.catalog import load_catalog_tevcat\n\n tevcat = load_catalog_tevcat()\n all_sky_exclusion = ExclusionMask.empty(nxpix=3600, nypix=1800, binsz=0.1,\n fill=1, dtype='int')\n val_lon, val_lat = all_sky_exclusion.coordinates()\n lons = Longitude(val_lon, 'deg')\n lats = Latitude(val_lat, 'deg')\n\n for source in tevcat:\n lon = Longitude(source['coord_gal_lon'], 'deg')\n lat = Latitude(source['coord_gal_lat'], 'deg')\n x = Angle(source['size_x'], 'deg')\n y = Angle(source['size_y'], 'deg')\n if np.isnan(x) and np.isnan(y):\n rad = Angle('0.3 deg')\n else:\n rad = x if x > y else y\n\n mask = lon_lat_circle_mask(lons, lats, lon, lat, rad)\n all_sky_exclusion.data[mask] = 0\n\n return all_sky_exclusion", "def restrict_non_nbrs_from_repacking(pose, res, task, pack_radius):\n\n center = pose.residue( res ).xyz( pose.residue( res ).nbr_atom() )\n print( \"Res: pack radius: \"+repr(pack_radius) )\n for i in range(1, pose.total_residue() + 1):\n # only pack the mutating residue and any within the pack_radius\n if i == res: continue\n\n nbr = pose.residue( i ).xyz( pose.residue( i ).nbr_atom() )\n dist = nbr.distance(center)\n if dist > pack_radius:\n task.nonconst_residue_task(i).prevent_repacking()\n else:\n task.nonconst_residue_task(i).restrict_to_repacking()\n\n #print task\n return task", "def objective_constraints(self, variables, mask, load, generation, reservations=None):\n constraint_list = []\n constraint_list += [cvx.NonPos(-variables['regu_c'])]\n constraint_list += [cvx.NonPos(-variables['regd_c'])]\n constraint_list += [cvx.NonPos(-variables['regu_d'])]\n constraint_list += [cvx.NonPos(-variables['regd_d'])]\n # p = opt_vars['dis'] - opt_vars['ch']\n # constraint_list += [cvx.NonPos(opt_vars['regd_d'] - cvx.pos(p))]\n # constraint_list += [cvx.NonPos(opt_vars['regu_c'] - cvx.neg(p))]\n if self.combined_market:\n constraint_list += [cvx.Zero(variables['regd_d'] + variables['regd_c'] - variables['regu_d'] - variables['regu_c'])]\n\n return constraint_list", "def get_model_parameter_bounds():\n minf = float(\"-inf\")\n inf = float(\"inf\")\n params = dict(mu=(minf,inf), rho=(0.0 ,inf))\n return params", "def _autobounds(self):\n bounds = {}\n\n def check(prop, compare, extreme, val):\n opp = min if compare is max else max\n bounds.setdefault(prop, val)\n bounds[prop] = opp(compare(bounds[prop], val), extreme)\n\n def bound_check(lat_lon):\n lat, lon = lat_lon\n check('max_lat', max, 90, lat)\n check('min_lat', min, -90, lat)\n check('max_lon', max, 180, lon)\n check('min_lon', min, -180, lon)\n\n lat_lons = [lat_lon for feature in self._features.values() for\n lat_lon in feature.lat_lons]\n if not lat_lons:\n lat_lons.append(self._default_lat_lon)\n for lat_lon in lat_lons:\n bound_check(lat_lon)\n\n return bounds", "def _qt_radius_clustering_minimal(self, min_to_cluster, reduced, unassigned_orphans, cache, max_cycles):\n # Separating components and removing dominated indices reduced runtime on tbpb82 0.4@100% from 10s to 10ms.\n # Before removing dominated, tree_275 0.04@100% found a solution with score 4.0485 after 228k cycles. After, found it in 49k. After adding the second Counter to CoverManager, found it under 1k cycles. Each cycle was substantially slower, but the solution still was found ~1000x faster (ms instead of 20 min).\n out_of_range = reduced.copy()\n out_of_range[out_of_range != 0] = 1\n neighbors_of = {}\n for ind in self._not_ignored_inds:\n clstr_inds = np.nonzero(reduced[:,ind] == 0)[0]\n neighbors_of[ind] = set(clstr_inds)\n chsn_indices = set(self.index[name] for name in self.chosen)\n avail_indices = set(self.index[name] for name in self.available)\n num_not_ignored = len(self._not_ignored_inds)\n considered_nbrs, dominated_inds = self._remove_dominated_inds(neighbors_of, chsn_indices, avail_indices, out_of_range)\n # # Process depending on the run parameters\n cache['cycles_used'] = 0\n final_centre_inds, final_scores = [], []\n if min_to_cluster == num_not_ignored: # Critical percent equivalent to 100%\n # Can dramatically speed up the search by separating components\n component_inds = self._identify_components(neighbors_of)\n subset_cycles, cycle_rollover = None, 0\n for subset_indices in component_inds:\n subset_to_cluster = len(subset_indices)\n subset_chosen = chsn_indices & subset_indices\n subset_avail = avail_indices & subset_indices\n if max_cycles != None:\n subset_cycles = ceil(subset_to_cluster/float(min_to_cluster) * max_cycles) + cycle_rollover\n subset_centre_inds, subset_scores, subset_cycles_used = self._qt_radius_cluster_subset(subset_indices, subset_chosen, subset_avail, considered_nbrs, dominated_inds, subset_to_cluster, cache, subset_cycles, out_of_range)\n if subset_cycles_used == None or subset_cycles_used >= subset_cycles:\n cycle_rollover = 0\n else:\n cycle_rollover = subset_cycles - subset_cycles_used\n final_centre_inds.extend(subset_centre_inds)\n final_scores.extend(subset_scores)\n elif min_to_cluster == num_not_ignored - len(unassigned_orphans):\n # Can still use the component speedup in this case\n orphan_inds = set(unassigned_orphans)\n component_inds = self._identify_components(neighbors_of)\n subset_cycles, cycle_rollover = None, 0\n for subset_indices in component_inds:\n if max_cycles != None:\n subset_cycles = ceil(len(subset_indices)/float(min_to_cluster) * max_cycles) + cycle_rollover\n subset_to_cluster = len(subset_indices - orphan_inds)\n if subset_to_cluster == 0: # The entire subset is orphaned, so no centers can be found\n if max_cycles != None:\n cycle_rollover += subset_cycles\n continue\n subset_chosen = chsn_indices & subset_indices\n subset_avail = avail_indices & subset_indices\n subset_centre_inds, subset_scores, subset_cycles_used = self._qt_radius_cluster_subset(subset_indices, subset_chosen, subset_avail, considered_nbrs, dominated_inds, subset_to_cluster, cache, subset_cycles, out_of_range)\n if subset_cycles_used == None or subset_cycles_used >= subset_cycles:\n cycle_rollover = 0\n else:\n cycle_rollover = subset_cycles - subset_cycles_used\n final_centre_inds.extend(subset_centre_inds)\n final_scores.extend(subset_scores)\n else:\n # Can't split into components and guarantee optimal, as I can't predict which component should be allowed to miss some variants.\n # May be a way to remove some components from consideration, but likely requires running _qt_radius_cluster_subset() multiple times. May still be faster, so worth considering if more speed is actually useful here.\n # - All unassigned orphans are part of total_allowed_missed by definition. So all other clusters are only allowed to miss allowed_missed = total_allowed_missed - len(unassigned_orphans).\n # - The global optimal solution for some component is guaranteed to fall between the solution for that component finding 100% of variants, and the solution for that component finding len(component)-allowed_missed variants. If they are equal, that's the global optimal solution for that component, and it can be excluded from the combined run. If they're unequal, it was a waste of time and the component has to be included in the combined run.\n final_centre_inds, final_scores, _cycles_used = self._qt_radius_cluster_subset(set(neighbors_of.keys()), chsn_indices, avail_indices, considered_nbrs, dominated_inds, min_to_cluster, cache, max_cycles, out_of_range)\n alt_variants = []\n return final_centre_inds, final_scores, alt_variants", "def sector_mask(shape,centre,radius,angle_range):\n\n x,y = np.ogrid[:shape[0],:shape[1]]\n cx,cy = centre\n tmin,tmax = np.deg2rad(angle_range)\n\n # ensure stop angle > start angle\n if tmax < tmin:\n tmax += 2*np.pi\n\n # convert cartesian --> polar coordinates\n r2 = (x-cx)*(x-cx) + (y-cy)*(y-cy)\n theta = np.arctan2(x-cx,y-cy) - tmin\n\n # wrap angles between 0 and 2*pi\n theta %= (2*np.pi)\n\n # circular mask\n circmask = r2 <= radius*radius\n\n # angular mask\n anglemask = theta <= (tmax-tmin)\n\n return circmask*anglemask", "def sector_mask(shape,centre,radius,angle_range):\n\n x,y = np.ogrid[:shape[0],:shape[1]]\n cx,cy = centre\n tmin,tmax = np.deg2rad(angle_range)\n\n # ensure stop angle > start angle\n if tmax < tmin:\n tmax += 2*np.pi\n\n # convert cartesian --> polar coordinates\n r2 = (x-cx)*(x-cx) + (y-cy)*(y-cy)\n theta = np.arctan2(x-cx,y-cy) - tmin\n\n # wrap angles between 0 and 2*pi\n theta %= (2*np.pi)\n\n # circular mask\n circmask = r2 <= radius*radius\n\n # angular mask\n anglemask = theta <= (tmax-tmin)\n\n return circmask*anglemask", "def sector_mask(shape,centre,radius,angle_range):\n\n x,y = np.ogrid[:shape[0],:shape[1]]\n cx,cy = centre\n tmin,tmax = np.deg2rad(angle_range)\n\n # ensure stop angle > start angle\n if tmax < tmin:\n tmax += 2*np.pi\n\n # convert cartesian --> polar coordinates\n r2 = (x-cx)*(x-cx) + (y-cy)*(y-cy)\n theta = np.arctan2(x-cx,y-cy) - tmin\n\n # wrap angles between 0 and 2*pi\n theta %= (2*np.pi)\n\n # circular mask\n circmask = r2 <= radius*radius\n\n # angular mask\n anglemask = theta <= (tmax-tmin)\n\n return circmask*anglemask", "def rigid_body_constraints(self):\n phi_1 = constant_distance(self.r_i.symbolic_coordinates -\n self.r_j.symbolic_coordinates, self.length)\n phi_2 = constant_distance(self.u.symbolic_coordinates, 1)\n phi_3 = constant_distance(self.v.symbolic_coordinates, 1)\n phi_4 = perpendicular(self.u.symbolic_coordinates,\n self.v.symbolic_coordinates)\n phi_5 = perpendicular(self.r_i.symbolic_coordinates -\n self.r_j.symbolic_coordinates,\n self.u.symbolic_coordinates)\n phi_6 = perpendicular(self.r_i.symbolic_coordinates -\n self.r_j.symbolic_coordinates,\n self.v.symbolic_coordinates)\n\n self.constraints = [phi_1, phi_2, phi_3, phi_4, phi_5, phi_6]", "def __init__(self, shape: Tuple[int, int], spacing: float, asymmetric_grid: bool):\n cols, rows = shape\n super().__init__(\n CalibrationTargetType.CircleGrid,\n rows,\n cols,\n spacing=spacing,\n asymmetric_grid=asymmetric_grid,\n )", "def __init__(self, name: str, radius: float):\r\n\r\n Shape.__init__(self, name)\r\n self.__radius = radius\r\n self.validation()", "def cluster_centres_ska_v5(r_min=None, r_max=None):\n # Spiral parameters for inner and outer regions.\n num_arms = 3\n num_per_arm = 5\n start_inner = 417.82\n end_inner = 1572.13\n b_inner = 0.513\n theta0_inner = -48\n start_outer = 2146.78\n end_outer = 6370.13\n b_outer = 0.52\n theta0_outer = 135\n x_inner, y_inner = TelescopeLayout.symmetric_log_spiral(\n num_per_arm, start_inner, end_inner, b_inner, num_arms,\n theta0_inner)\n x_outer, y_outer = TelescopeLayout.symmetric_log_spiral(\n num_per_arm, start_outer, end_outer, b_outer, num_arms,\n theta0_outer)\n x = np.concatenate((x_inner, x_outer))\n y = np.concatenate((y_inner, y_outer))\n r = (x**2 + y**2)**0.5\n arm_index = [i // num_per_arm for i in range(num_per_arm * num_arms)]\n arm_index = np.hstack((arm_index, arm_index))\n\n # Sort by radius and remove the 3 innermost stations.\n idx = r.argsort()\n x = x[idx]\n y = y[idx]\n r = r[idx]\n arm_index = arm_index[idx]\n x, y, r, arm_index = (x[3:], y[3:], r[3:], arm_index[3:])\n\n if r_min and r_max:\n idx = np.where(np.logical_and(r >= r_min, r <= r_max))\n x, y, arm_index = x[idx], y[idx], arm_index[idx]\n elif r_min:\n idx = np.where(r >= r_min)\n x, y, arm_index = x[idx], y[idx], arm_index[idx]\n elif r_max:\n idx = np.where(r <= r_max)\n x, y, arm_index = x[idx], y[idx], arm_index[idx]\n return x, y, arm_index", "def minimum_spanning_arborescence(sol):", "def get_suffstat_bounds():\n minf = float(\"-inf\")\n inf = float(\"inf\")\n params = dict(sum_x=(minf,inf), sum_x_squared=(0.0 ,inf))\n return params", "def RPS_stripping_radius(self, alpha=1.0): \n \n # loops over RPS condition to find where it is zero ... first checks RPS condition at zero\n RPS_condition_center = self.halo_density(0.0)*self.galaxy_velocity(0.0)**2 -\\\n alpha * (cgs.G * (self.ic['M_DM'] + self.ic['M_HI']) * self.ic['n_o']*cgs.mp*self.ic['mu_dwarf'])/ self.ic['r_HI']\n \n #_RPS_condition(0.5*cgs.pc, self.DM_profile, self.gas_profile, self.halo_density(0.0),\n # self.galaxy_velocity(0.0), alpha=alpha)\n RPS_condition_edge = _RPS_condition(self.ic['r_HI'], self.DM_profile, self.gas_profile, self.halo_density(0.0),\n self.galaxy_velocity(0.0), alpha=alpha)\n \n if RPS_condition_center >= 0.0:\n \n predicted_R_strip = 0.0 ; predicted_M_final = 0.0\n \n elif RPS_condition_edge <= 0.0:\n \n predicted_R_strip = self.ic['r_HI'] ; predicted_M_final = self.ic['M_HI']\n \n else: # it is somewhere in between... solve\n \n \n eq_to_solve = lambda x : _RPS_condition( x, self.DM_profile, self.gas_profile, self.halo_density(0.0),\n self.galaxy_velocity(0.0), alpha = alpha)\n \n \n # there is a chance that the naive r values to look between both have the same sign\n f_a = 0.25*cgs.pc ; f_b = self.ic['r_HI']\n \n if np.sign(eq_to_solve(f_a)) == np.sign(eq_to_solve(f_b)):\n # sample at many radii and help out the root solver\n r_sample = np.linspace(0.1 * cgs.pc, self.ic['r_HI'], 1000.0)\n dr = r_sample[1] - r_sample[0]\n RPS_condition_sample = eq_to_solve(r_sample)\n \n l = np.sign(RPS_condition_sample[:-1])\n r = np.sign(RPS_condition_sample[1: ])\n \n sign_sum = l + r # either 2, 0, or -2 ... want the zeros\n r_near_root = r_sample[sign_sum == 0]\n\n if np.size(r_near_root) > 1:\n r_near_root = r_near_root[-1]\n else:\n r_near_root = float(r_near_root)\n \n f_a = r_near_root - 2.0* dr ; f_b = r_near_root + 2.0*dr\n \n predicted_R_strip = brentq(eq_to_solve, f_a, f_b)\n else:\n predicted_R_strip = brentq(eq_to_solve, f_a, f_b)\n \n # now calcualte the mass remaining within R_strip\n gas_integrand = lambda x: x*x*self.gas_profile(x)\n predicted_M_final = 4.0*np.pi* integrate.quad(gas_integrand, 0.0, predicted_R_strip)[0]\n\n \n \n \n self.predicted_R_strip = predicted_R_strip\n self.predicted_M_final = predicted_M_final\n \n \n return self.predicted_R_strip, self.predicted_M_final", "def clip_grads(self): # Clipping gradients for stability\n parameters = list(filter(lambda p: p.grad is not None, self.machine.parameters()))\n for p in parameters:\n p.grad.data.clamp_(-10, 10)", "def _reduce_distances(self, threshold):\n reduced = self.orig_dists.copy()\n reduced[reduced <= threshold] = 0\n # Remove ignored from all consideration\n ignrd_indices = [self.index[name] for name in self.ignored]\n if ignrd_indices:\n reduced[:,ignrd_indices] = np.inf\n reduced[ignrd_indices,:] = np.inf\n # Check if the given parameters are feasible\n chsn_indices = set(self.index[name] for name in self.chosen)\n avail_indices = set(self.index[name] for name in self.available)\n ca_indices = chsn_indices | avail_indices\n unassigned_indices = np.array(list(self._not_ignored_inds - ca_indices))\n if len(unassigned_indices) == 0:\n unassigned_orphans = unassigned_indices\n else:\n ca_indices = list(ca_indices)\n avail_in_range = np.count_nonzero(reduced[np.ix_(unassigned_indices,ca_indices)] == 0, axis=1)\n unassigned_orphans = unassigned_indices[avail_in_range == 0]\n return reduced, unassigned_orphans", "def roundConstantRadius(*args, append: bool=False, constructionHistory: bool=True, name:\n AnyStr=\"\", object: bool=True, radiuss: Union[float, List[float]]=0.0,\n side: Union[List[AnyStr, int], List[List[AnyStr, int]]]=None, sidea:\n Union[int, List[int]]=0, sideb: Union[int, List[int]]=0, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass", "def get_hyperparameter_bounds():\n minf = float(\"-inf\")\n inf = float(\"inf\")\n params = dict(mu=(minf,inf), nu=(0.0 ,inf), r=(0.0, inf), s=(0.0, inf))\n return params", "def get_constraints(self):\n return ({'type': 'ineq', 'fun': lambda x: x[1] - x[2]},\n {'type': 'ineq', 'fun': lambda x: x[3] - x[4]})", "def action_space(self):\n lower_bounds = np.array([])\n upper_bounds = np.array([])\n for joint in self._used_joints:\n joint_idx = self._joint_limits.joint_names.index(joint)\n if self._control_mode == 'position':\n lower_bounds = np.concatenate(\n (lower_bounds,\n np.array(self._joint_limits.position_lower[\n joint_idx:joint_idx + 1])))\n upper_bounds = np.concatenate(\n (upper_bounds,\n np.array(self._joint_limits.position_upper[\n joint_idx:joint_idx + 1])))\n elif self._control_mode == 'velocity':\n velocity_limit = np.array(\n self._joint_limits.velocity[joint_idx:joint_idx + 1]) * 0.1\n lower_bounds = np.concatenate((lower_bounds, -velocity_limit))\n upper_bounds = np.concatenate((upper_bounds, velocity_limit))\n elif self._control_mode == 'effort':\n effort_limit = np.array(\n self._joint_limits.effort[joint_idx:joint_idx + 1])\n lower_bounds = np.concatenate((lower_bounds, -effort_limit))\n upper_bounds = np.concatenate((upper_bounds, effort_limit))\n else:\n raise ValueError(\n 'Control mode %s is not known!' % self._control_mode)\n return gym.spaces.Box(\n np.concatenate((lower_bounds, np.array([0]))),\n np.concatenate((upper_bounds, np.array([100]))),\n dtype=np.float32)", "def _add_bound_configs(CONFIG):\n CONFIG.declare(\n 'obj_bound',\n ConfigValue(\n default=1e15,\n domain=PositiveFloat,\n description='Bound applied to the linearization of the objective function if main MIP is unbounded.',\n ),\n )\n CONFIG.declare(\n 'continuous_var_bound',\n ConfigValue(\n default=1e10,\n description='Default bound added to unbounded continuous variables in nonlinear constraint if single tree is activated.',\n domain=PositiveFloat,\n ),\n )\n CONFIG.declare(\n 'integer_var_bound',\n ConfigValue(\n default=1e9,\n description='Default bound added to unbounded integral variables in nonlinear constraint if single tree is activated.',\n domain=PositiveFloat,\n ),\n )\n CONFIG.declare(\n 'initial_bound_coef',\n ConfigValue(\n default=1e-1,\n domain=PositiveFloat,\n description='The coefficient used to approximate the initial primal/dual bound.',\n ),\n )", "def initialize_radius(src):\n\n x_length = (np.amax(src[:, 0]) - np.amin(src[:, 0]))\n y_length = (np.amax(src[:, 1]) - np.amin(src[:, 1]))\n z_length = (np.amax(src[:, 2]) - np.amin(src[:, 2]))\n\n max_length = max(x_length, y_length, z_length)\n\n if max_length > 50:\n radius = 10\n elif max_length > 1:\n radius = 1\n else:\n radius = 0.01\n\n return radius" ]
[ "0.57216144", "0.5593304", "0.5487926", "0.5459716", "0.5433389", "0.5411653", "0.5399157", "0.529492", "0.5289383", "0.528124", "0.52704936", "0.52636516", "0.5226623", "0.52207905", "0.52052534", "0.51941854", "0.51933926", "0.5185518", "0.51653844", "0.51490223", "0.5141201", "0.5120843", "0.51196635", "0.5117338", "0.5112763", "0.5112375", "0.51065964", "0.50865054", "0.5068361", "0.50505036", "0.5048991", "0.5038256", "0.5032162", "0.5032155", "0.50315124", "0.5015905", "0.5009207", "0.5007167", "0.49993807", "0.49952322", "0.4992828", "0.49734852", "0.49522004", "0.4938856", "0.49258325", "0.4909721", "0.49067596", "0.49038136", "0.49019867", "0.48999244", "0.48964646", "0.48926702", "0.4888923", "0.48868442", "0.48747948", "0.48698467", "0.48690858", "0.48673755", "0.48653865", "0.48650444", "0.48608288", "0.48585254", "0.485739", "0.48533547", "0.48476428", "0.48450464", "0.48448792", "0.48345447", "0.48305097", "0.48295182", "0.48251665", "0.48193625", "0.48075235", "0.4807234", "0.48063254", "0.47996068", "0.47995558", "0.47968218", "0.47949976", "0.4792386", "0.4790972", "0.47769356", "0.47712332", "0.47712332", "0.47712332", "0.47691146", "0.47690767", "0.47654086", "0.47636774", "0.47561783", "0.47554177", "0.47457418", "0.4737157", "0.4735138", "0.47348204", "0.47326124", "0.47309753", "0.47307235", "0.4721343", "0.47198337" ]
0.6335594
0
Returns true if provenance of property is SPE or AST
def use_property(kepid, prop): try: prov = kicu.DATA.ix[kepid, '{}_prov'.format(prop)] return any([prov.startswith(s) for s in ['SPE', 'AST']]) except KeyError: raise MissingStellarError('{} not in stellar table?'.format(kepid))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isprop(v):\n return isinstance(v, property)", "def isproperty(object):\n return isinstance(object, property)", "def is_simple(self):\n return self.propertyValueType.lower() in ('float', 'double',\n 'int', 'integer',\n 'string')", "def isSemantics(self):\n return _libsbml.ASTNode_isSemantics(self)", "def is_psionic(self) -> bool:\n return ATTRIBUTE.Psionic.value in self.type_data.attributes", "def species_has_sp(species_output_dict: dict) -> bool:\n if species_output_dict['paths']['sp'] or species_output_dict['paths']['composite']:\n return True\n return False", "def is_vespene_geyser(self) -> bool:\n return self.type_data.has_vespene", "def is_prop_symbol(s):\n return is_symbol(s) and s[0].isupper() and s != 'TRUE' and s != 'FALSE'", "def can_prove(self, target):\n return self.prop == target.prop and set(self.hyps).issubset(set(target.hyps))", "def check_thm_type(self):\n for t in list(self.hyps) + [self.prop]:\n if t.checked_get_type() != BoolType:\n raise term.TypeCheckException('expect boolean type for propositions')", "def has_expression(self):\n return self._expression is not None", "def is_procedure(vba_object):\n if hasattr(vba_object, 'statements'):\n return True\n else:\n return False", "def test_should_return_appropriate_type(self):\r\n assert isinstance(self.spec_parser.parse_statement(self.edge_spec), Edge)\r\n assert isinstance(self.spec_parser.parse_statement(self.property_spec), Property)", "def is_plotable(self):\n return self.propertyValueType.lower() in ('float', 'double',\n 'int', 'integer')", "def isComputed(self) -> bool:\n ...", "def is_expression(self):\r\n return conf.lib.clang_isExpression(self)", "def is_apt(self):\r\n return self.has_label('apt')", "def is_proved(self):\n return len(self.proofs) > 0", "def is_variant(self):\n return bool(self.gt_type)", "def __eq__(self, obj: \"Property\") -> bool:\n return self.name == obj.name and self.property_type == obj.property_type", "def process_property(self, prop):\n NifLog.warn(f\"Unknown property block found : {prop.name}\")\n NifLog.warn(f\"This type isn't currently supported: {type(prop)}\")", "def has_path_source(self) -> bool:\n\n return any(self.is_path_type(x) for x in self.parameters)", "def is_P(self):\n return isinstance(self,P)", "def is_equals(self):\n return self.prop.is_equals()", "def match(self, proof: dict) -> bool:\n return proof.get(\"proofPurpose\") == self.term", "def _is_simple_type(cls):\n return all([\n AnnotationWrapper(anno).is_simple_in_opt_and_not_opt\n for anno in cls._used_annotations()\n ])", "def hasVeryTrustedValue(self):\n return self.subnode_source.hasVeryTrustedValue()", "def is_indexed_or_named_property_operation(self):\n return self.is_getter or self.is_setter or self.is_deleter", "def parsable_as_expression(self):\n return self.parsable and self.expression_ast_node is not None", "def promoter(self):\n return self.mut['ProMutation'] is None", "def is_preprocessing(self):\r\n return conf.lib.clang_isPreprocessing(self)", "def parsable(self):\n return isinstance(self._ast_node_or_parse_exception, ast.AST)", "def __bool__(self):\n return (self.value == POS)", "def determine_if_source(self):\n # titles ending in a parenthetical (usually with date) are generally\n # sources.\n p = re.compile(r'.*\\(.*\\)')\n m = p.match(self.title)\n if self.title in ['Quotes', 'Sourced']:\n self.is_source = False\n return\n # otherwise, sections that have no children, and where most quotes\n # don't appear to have a source, are usually sources\n if m and m.group() == self.title:\n self.is_source = True\n return\n quotes_lack_source = False\n n_quotes_with_source = sum(\n map(lambda x: x.potential_source is not None, self.quotes))\n n_quotes = len(self.quotes)\n if n_quotes > 0 and n_quotes_with_source / n_quotes < .5:\n quotes_lack_source = True\n has_children = len(self.children) > 0\n if quotes_lack_source and not has_children:\n self.is_source = True", "def _is_valid_pt(content_type: str) -> bool:\n content_type = content_type.strip()\n return content_type in SPECIFICATION_PRIMITIVE_TYPES", "def is_coding(self):\n return self.protein_seq is not None", "def is_mortgaged(self, property_name):\n is_mortgaged = self.db.read_value(property_name, \"is_mortgaged\")\n if is_mortgaged == \"yes\":\n return True\n else:\n return False", "def is_equivalence(self) -> bool:", "def _is_propertyable(\n names, # type: List[str]\n attrs, # type: Dict[str, Any]\n annotations, # type: Dict[str, type]\n attr, # Dict[str, Any]\n):\n # type: (...) -> bool\n return (\n attr in annotations\n and not attr.startswith(\"_\")\n and not attr.isupper()\n and \"__{}\".format(attr) not in names\n and not isinstance(getattr(attrs, attr, None), types.MethodType)\n )", "def is_on(self) -> bool:\n return self.entity_description.state_fn(self._valve)", "def has_indel(variant):\n _validate_str(variant)\n if variant == WILD_TYPE_VARIANT:\n return False\n elif variant == SYNONYMOUS_VARIANT:\n return False\n else:\n return any(x in variant for x in (\"ins\", \"del\", \"dup\"))", "def _is_cop(target: Any) -> bool:\n\n if isinstance(target, str):\n return target in common_ops\n else:\n return target.__name__ in common_ops", "def test_proper(self):\n\n self.assertTrue(self.cs.isProper)\n self.assertFalse(self.cs.isDegenerate)", "def _is_ref_prop(name):\n return name.endswith(\"_ref\") or name.endswith(\"_refs\")", "def has_vespene(self) -> bool:\n return bool(self.proto.vespene_contents)", "def is_assumption(n):\n if not isinstance(n, types.Symbol):\n return False\n symbol = self.symbol_table.lookup(n.name)\n if symbol and symbol.type_ == SymbolType.StackItem:\n return True\n return False", "def check_property(self, descriptor): # pylint: disable=unused-argument\r\n raise SkipTest(\"check_property not defined\")", "def is_sed(self) -> bool:\n return False", "def check_property(self, descriptor):\r\n self.assertEqual(descriptor.get_html(), descriptor.render('studio_view').content)", "def HasPerInstancePropertyProviders(self) -> bool:", "def is_ppt(filename):\n have_current_user = False\n have_user_edit = False\n have_persist_dir = False\n have_document_container = False\n ppt_file = None\n try:\n ppt_file = PptFile(filename)\n for stream in ppt_file.iter_streams():\n if stream.name == 'Current User':\n for record in stream.iter_records():\n if isinstance(record, PptRecordCurrentUser):\n have_current_user = True\n if have_current_user and have_user_edit and \\\n have_persist_dir and have_document_container:\n return True\n elif stream.name == 'PowerPoint Document':\n for record in stream.iter_records():\n if record.type == 0x0ff5: # UserEditAtom\n have_user_edit = True\n elif record.type == 0x1772: # PersistDirectoryAtom\n have_persist_dir = True\n elif record.type == 0x03e8: # DocumentContainer\n have_document_container = True\n else:\n continue\n if have_current_user and have_user_edit and \\\n have_persist_dir and have_document_container:\n return True\n else: # ignore other streams/storages since they are optional\n continue\n except Exception as exc:\n logging.debug('Ignoring exception in is_ppt, assume is not ppt',\n exc_info=True)\n finally:\n if ppt_file is not None:\n ppt_file.close()\n return False", "def parseProperties(self) -> bool:\n\n # vNetIds is not a mandatory property. This property can be used if the resources are distributed across multiple vNets.\n self.vNetIds = self.providerProperties.get(\"vNetIds\", None)\n\n # enabledProviders contains the provider types for which AIOps is enabled. Mandatory property.\n self.enabledProviders = self.providerProperties.get(\n \"enabledProviders\", None)\n if not self.enabledProviders:\n self.tracer.error(\n \"[%s] enabledProviders cannot be empty in the AIOps config.\" % self.fullName)\n return False\n return True", "def matches_property_name(fun):\n return callable(fun) and getattr(fun, annotation, None) == value", "def is_expected_for_this_test(obj):\n if obj['test-name'] != test_name:\n return False\n if not fnmatch.fnmatch(config_filename, obj['configuration-filename']):\n return False\n expected_variant = obj.get('variant', None)\n if expected_variant == \"*\":\n return True\n for k in expected_variant:\n if not k in variant:\n return False\n if expected_variant[k] != variant[k]:\n return False\n return True", "def isprogram(self):\n return True", "def is_procedure(procedure):\n return isa(procedure,Procedure) or isa(procedure,type(max)) or isa(procedure,type(tostr))", "def isProduction(obj):\n return 'PROD' in obj.tags # This is deprecated? obj.tags.has_key('PROD')", "def is_verb_concept(self):\n return self._concept_type is not None and self._concept_type.find('verb') != -1", "def is_field(self, proof = True):\n return True", "def checkPropertyCompliance(soup, PropertyName, PropertyItem, decoded, refs):\n resultList = OrderedDict()\n counts = Counter()\n\n rsvLogger.info(PropertyName)\n item = PropertyName.split(':')[-1]\n\n propValue = decoded.get(item, 'n/a')\n rsvLogger.info(\"\\tvalue: %s %s\", propValue, type(propValue))\n\n propAttr = PropertyItem['attrs']\n\n propType = propAttr.get('type')\n propRealType = PropertyItem.get('realtype')\n rsvLogger.info(\"\\thas Type: %s %s\", propType, propRealType)\n\n propExists = not (propValue == 'n/a')\n propNotNull = propExists and propValue is not None and propValue is not 'None'\n\n # why not actually check oem\n # rs-assertion: 7.4.7.2\n if 'Oem' in PropertyName:\n rsvLogger.info('\\tOem is skipped')\n counts['skipOem'] += 1\n return {item: ('-', '-',\n 'Exists' if propExists else 'DNE', 'SkipOEM')}, counts\n\n propMandatory = False\n propMandatoryPass = True\n if 'Redfish.Required' in PropertyItem:\n propMandatory = True\n propMandatoryPass = True if propExists else False\n rsvLogger.info(\"\\tMandatory Test: %s\",\n 'OK' if propMandatoryPass else 'FAIL')\n else:\n rsvLogger.info(\"\\tis Optional\")\n if not propExists:\n rsvLogger.info(\"\\tprop Does not exist, skip...\")\n counts['skipOptional'] += 1\n return {item: ('-', (propType, propRealType),\n 'Exists' if propExists else 'DNE',\n 'SkipOptional')}, counts\n\n propNullable = propAttr.get('nullable')\n propNullablePass = True\n if propNullable is not None:\n propNullablePass = (\n propNullable == 'true') or not propExists or propNotNull\n rsvLogger.info(\"\\tis Nullable: %s %s\", propNullable, propNotNull)\n rsvLogger.info(\"\\tNullability test: %s\",\n 'OK' if propNullablePass else 'FAIL')\n\n # rs-assertion: Check for permission change\n propPermissions = propAttr.get('Odata.Permissions')\n if propPermissions is not None:\n propPermissionsValue = propPermissions['enummember']\n rsvLogger.info(\"\\tpermission %s\", propPermissionsValue)\n\n validPatternAttr = PropertyItem.get(\n 'Validation.Pattern')\n validMinAttr = PropertyItem.get('Validation.Minimum')\n validMaxAttr = PropertyItem.get('Validation.Maximum')\n\n paramPass = True\n\n # Note: consider http://docs.oasis-open.org/odata/odata-csdl-xml/v4.01/csprd01/odata-csdl-xml-v4.01-csprd01.html#_Toc472333112\n # Note: make sure it checks each one\n propCollectionType = PropertyItem.get('isCollection')\n isCollection = propCollectionType is not None\n if propCollectionType is not None and propNotNull:\n # note: handle collections correctly, this needs a nicer printout\n # rs-assumption: do not assume URIs for collections\n # rs-assumption: check @odata.count property\n # rs-assumption: check @odata.link property\n rsvLogger.info(\"\\tis Collection\")\n resultList[item] = ('Collection, size: ' + str(len(propValue)), (propType, propRealType),\n 'Exists' if propExists else 'DNE',\n '...')\n propValueList = propValue\n else:\n propValueList = [propValue]\n # note: make sure we don't enter this on null values, some of which are\n # OK!\n for cnt, val in enumerate(propValueList):\n appendStr = (('#' + str(cnt)) if isCollection else '')\n if propRealType is not None and propExists and propNotNull:\n paramPass = False\n if propRealType == 'Edm.Boolean':\n paramPass = isinstance( val, bool )\n if not paramPass:\n rsvLogger.error(\"%s: Not a boolean\" % PropertyName)\n\n elif propRealType == 'Edm.DateTimeOffset':\n # note: find out why this might be wrong \n if isinstance(val, str):\n match = re.match(\n '.*(Z|(\\+|-)[0-9][0-9]:[0-9][0-9])', str(val))\n paramPass = match is not None\n if not paramPass:\n rsvLogger.error(\"%s: Malformed DateTimeOffset\" % PropertyName)\n else:\n rsvLogger.error(\"%s: Expected string value for DateTimeOffset\" % PropertyName)\n\n\n elif propRealType == 'Edm.Int16' or propRealType == 'Edm.Int32' or\\\n propRealType == 'Edm.Int64' or propRealType == 'Edm.Int' or\\\n propRealType == 'Edm.Decimal' or propRealType == 'Edm.Double':\n rsvLogger.debug(\"intcheck: %s %s %s\", propRealType, val, (validMinAttr, validMaxAttr))\n paramPass = isinstance( val, (int, float) ) \n if paramPass:\n if 'Int' in propRealType:\n paramPass = isinstance( val, int )\n if not paramPass:\n rsvLogger.error(\"%s: Expected int\" % PropertyName)\n if validMinAttr is not None:\n paramPass = paramPass and int(\n validMinAttr['int']) <= val\n if not paramPass:\n rsvLogger.error(\"%s: Value out of assigned min range\" % PropertyName)\n if validMaxAttr is not None:\n paramPass = paramPass and int(\n validMaxAttr['int']) >= val\n if not paramPass:\n rsvLogger.error(\"%s: Value out of assigned max range\" % PropertyName)\n else:\n rsvLogger.error(\"%s: Expected numeric type\" % PropertyName)\n\n\n elif propRealType == 'Edm.Guid':\n if isinstance(val, str):\n match = re.match(\n \"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}\", str(val))\n paramPass = match is not None\n if not paramPass:\n rsvLogger.error(\"%s: Malformed Guid\" % PropertyName)\n else:\n rsvLogger.error(\"%s: Expected string value for Guid\" % PropertyName)\n \n elif propRealType == 'Edm.String':\n if isinstance(val, str):\n if validPatternAttr is not None:\n pattern = validPatternAttr.get('string', '')\n match = re.fullmatch(pattern, val)\n paramPass = match is not None\n if not paramPass:\n rsvLogger.error(\"%s: Malformed String\" % PropertyName)\n else:\n paramPass = True\n else:\n rsvLogger.error(\"%s: Expected string value\" % PropertyName)\n\n else:\n if propRealType == 'complex':\n rsvLogger.info('\\t***going into Complex')\n if not isinstance( val, dict ):\n resultList[item + appendStr]\\\n = ('ComplexDictionary' + appendStr, (propType, propRealType),\\\n 'Exists' if propExists else 'DNE',\\\n 'complexFAIL')\n rsvLogger.error(item + ' : Complex item not a dictionary')\n counts['complex'] += 1\n counts['failComplex'] += 1\n continue\n \n complexMessages = OrderedDict()\n complexCounts = Counter()\n innerPropDict = PropertyItem['typeprops']\n innerPropSoup = PropertyItem['soup']\n successService, serviceSchemaSoup, SchemaServiceURI = getSchemaDetails('metadata','/redfish/v1/$metadata','.xml')\n if successService:\n serviceRefs = getReferenceDetails(serviceSchemaSoup)\n successService, additionalProps = getAnnotations(serviceSchemaSoup, serviceRefs, val)\n for prop in additionalProps:\n innerPropDict[prop[2]] = getPropertyDetails(*prop)\n for prop in innerPropDict:\n propMessages, propCounts = checkPropertyCompliance(innerPropSoup, prop, innerPropDict[prop], val, refs)\n complexMessages.update(propMessages)\n complexCounts.update(propCounts)\n successPayload, odataMessages = checkPayloadCompliance('',val)\n complexMessages.update(odataMessages)\n rsvLogger.info('\\t***out of Complex')\n rsvLogger.info('complex %s', complexCounts)\n counts.update(complexCounts)\n resultList[item + appendStr]\\\n = ('ComplexDictionary' + appendStr, (propType, propRealType),\\\n 'Exists' if propExists else 'DNE',\\\n 'complex')\n if item == \"Actions\":\n success, baseSoup, baseRefs, baseType = True, innerPropSoup, getReferenceDetails(innerPropSoup), decoded.get('@odata.type')\n actionsDict = dict()\n\n while success:\n SchemaNamespace, SchemaType = getNamespace(baseType), getType(baseType)\n innerschema = baseSoup.find('schema', attrs={'namespace': SchemaNamespace})\n actions = innerschema.find_all('action')\n for act in actions:\n keyname = '#%s.%s' % (SchemaNamespace, act['name'])\n actionsDict[keyname] = act\n success, baseSoup, baseRefs, baseType = getParentType(baseSoup, baseRefs, baseType, 'entitytype')\n \n for k in actionsDict:\n actionDecoded = val.get(k, 'n/a')\n actPass = False\n if actionDecoded != 'n/a':\n target = actionDecoded.get('target')\n if target is not None and isinstance( target, str ):\n actPass = True\n else:\n rsvLogger.error(k + ': target for action is malformed')\n else:\n rsvLogger.error(k + ': action not Found')\n complexMessages[k] = ('Action', '-',\\\n 'Exists' if actionDecoded != 'n/a' else 'DNE',\\\n 'PASS' if actPass else 'FAIL') \n counts['pass'] += 1\n \n for complexKey in complexMessages:\n resultList[item + '.' + complexKey + appendStr] = complexMessages[complexKey]\n\n for key in val:\n if key not in complexMessages:\n rsvLogger.error('%s: Appears to be an extra property (check inheritance or casing?)', item + '.' + key + appendStr)\n counts['failAdditional'] += 1\n resultList[item + '.' + key + appendStr] = (val[key], '-',\n 'Exists',\n '-')\n continue\n\n elif propRealType == 'enum':\n if isinstance(val, str):\n paramPass = val in PropertyItem['typeprops']\n if not paramPass:\n rsvLogger.error(\"%s: Invalid enum found (check casing?)\" % PropertyName)\n else:\n rsvLogger.error(\"%s: Expected string value for Enum\" % PropertyName)\n \n elif propRealType == 'deprecatedEnum':\n if isinstance(val, list):\n paramPass = True\n for enumItem in val:\n for k,v in enumItem.items():\n rsvLogger.debug('%s, %s' % (k,v))\n paramPass = paramPass and str(v) in PropertyItem['typeprops']\n if not paramPass:\n rsvLogger.error(\"%s: Invalid DeprecatedEnum found (check casing?)\" % PropertyName)\n elif isinstance(val, str):\n rsvLogger.debug('%s' % val)\n paramPass = str(val) in PropertyItem['typeprops']\n if not paramPass:\n rsvLogger.error(\"%s: Invalid DeprecatedEnum found (check casing?)\" % PropertyName)\n else:\n rsvLogger.error(\"%s: Expected list/str value for DeprecatedEnum? (\" % PropertyName) \n\n elif propRealType == 'entity':\n # check if the entity is truly what it's supposed to be\n autoExpand = PropertyItem.get('OData.AutoExpand',None) is not None or\\\n PropertyItem.get('OData.AutoExpand'.lower(),None) is not None\n uri = val['@odata.id']\n if not autoExpand:\n success, data, status = callResourceURI(uri)\n else:\n success, data, status = True, val, 200\n rsvLogger.debug('%s, %s, %s', success, (propType, propCollectionType), data)\n if propCollectionType == 'Resource.Item' or propType == 'Resource.Item' and success: \n paramPass = success \n elif success:\n currentType = data.get('@odata.type', propCollectionType)\n if currentType is None:\n currentType = propType\n baseLink = refs.get(getNamespace(propCollectionType if propCollectionType is not None else propType))\n baseLinkObj = refs.get(getNamespace(currentType.split('.')[0]))\n if soup.find('schema',attrs={'namespace': getNamespace(currentType)}) is not None:\n success, baseSoup = True, soup\n elif baseLink is not None:\n success, baseSoup, uri = getSchemaDetails(*baseLink)\n else:\n success = False\n\n rsvLogger.debug('success: %s %s %s',success, currentType, baseLink) \n if currentType is not None and success:\n currentType = currentType.replace('#','')\n baseRefs = getReferenceDetails(baseSoup)\n allTypes = []\n while currentType not in allTypes and success: \n allTypes.append(currentType)\n success, baseSoup, baseRefs, currentType = getParentType(baseSoup, baseRefs, currentType, 'entitytype')\n rsvLogger.debug('success: %s %s',success, currentType)\n\n rsvLogger.debug('%s, %s, %s', propType, propCollectionType, allTypes)\n paramPass = propType in allTypes or propCollectionType in allTypes\n if not paramPass:\n rsvLogger.error(\"%s: Expected Entity type %s, but not found in type inheritance %s\" % (PropertyName, (propType, propCollectionType), allTypes))\n else:\n rsvLogger.error(\"%s: Could not get schema file for Entity check\" % PropertyName)\n else:\n rsvLogger.error(\"%s: Could not get resource for Entity check\" % PropertyName)\n\n\n resultList[item + appendStr] = (val, (propType, propRealType),\n 'Exists' if propExists else 'DNE',\n 'PASS' if paramPass and propMandatoryPass and propNullablePass else 'FAIL')\n if paramPass and propNullablePass and propMandatoryPass:\n counts['pass'] += 1\n rsvLogger.info(\"\\tSuccess\")\n else:\n counts[propType] += 1\n if not paramPass:\n if propMandatory:\n rsvLogger.error(\"%s: Mandatory prop has failed to check\" % PropertyName)\n counts['failMandatoryProp'] += 1\n else:\n counts['failProp'] += 1\n elif not propMandatoryPass:\n rsvLogger.error(\"%s: Mandatory prop does not exist\" % PropertyName)\n counts['failMandatoryExist'] += 1\n elif not propNullablePass:\n rsvLogger.error(\"%s: This property is not nullable\" % PropertyName)\n counts['failNull'] += 1\n rsvLogger.info(\"\\tFAIL\")\n\n return resultList, counts", "def find_event_property(properties, value, events):\n for event in events:\n if properties in event and event[properties] == value:\n return True\n return False", "def is_scala(self):\r\n return self.has_label('scala')", "def is_reflexive(self):\n return self.prop.is_reflexive()", "def is_snv(self):\n return len(self.REF) == 1 and all(a.type == \"SNV\" for a in self.ALT)", "def is_on(self):\n return bool(getattr(self.resource, self.variable))", "def is_setup(self):\n if self.pocs is None:\n print_warning('POCS has not been setup. Please run `setup_pocs`')\n return False\n return True", "def testPsychPsychiatric(self):\n attr = self.session.create_visit_attr()\n\n self.util.boolTypeTest(self, attr, \"psychiatric\")\n\n self.util.boolPropertyTest(self, attr, \"psychiatric\")", "def validateProp(filename):\n\n # does the file exists\n if (not os.path.exists(filename)):\n LOG.warning('Prop file (%s) does not exist' % (filename))\n return False\n\n # can I read it\n try:\n propFile = open(filename, 'r')\n prop = json.load(propFile)\n propFile.close()\n except (ValueError, OSError):\n LOG.warning('Prop file (%s) unable to read or did not parse' % (filename))\n return False\n\n # does the prop have the correct value\n for key in ('name', 'md5', 'description', 'size', 'contact'):\n if (key not in prop):\n LOG.warning('Prop file (%s) missing key (%s)' % (filename, key))\n return False\n\n return True", "def isEquivTo(self, details):\r\n return self.getWikiLanguageName() == details.getWikiLanguageName()", "def value(self) -> bool:", "def to_sum_up(prop: 'string') -> 'Boolean':\n\n # strigs to be ignored in tooltip. Add new if missing\n ignores = ['Insured', 'durability', 'Weight', 'artifact rarity', 'strength requirement', 'two-handed weapon',\n 'skill required']\n\n for ignore in ignores:\n if prop.find(ignore) != -1 or prop == '':\n break\n else:\n return True\n\n return False", "def isSympy(val):\n properties = dir(val)\n return (\"is_symbol\" in properties) or (\"evalf\" in properties)", "def test_properties_evolution_get(self):\n pass", "def is_pure(self) -> bool:\r\n return self.is_valid and np.all([x[\"operation\"].is_pure for x in self.operations_by_name.values()])", "def has_sclass(self, w: Wrapper, prop: Any) -> bool:\n if not prop:\n return None\n props = self.sclasses(w)\n if isinstance(prop, str):\n ans = [prop in props]\n else:\n ans = [i in props for i in prop]\n return all(ans)", "def spec(self) -> bool:\n\t\treturn True", "def is_ontology_term_ref(v):\n return hasattr(v, \"name\") and hasattr(v, \"ontology_name\") and hasattr(v, \"accession\")", "def is_inequality(self):\n return True", "def is_inequality(self):\n return True", "def is_simple_in_opt(self) -> bool:\n return self.inner_part_of_optional.is_simple", "def isAnalysisRecipe(self):\r\n return True", "def property(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"property\")", "def _is_compound_of(entry: _LexiconEntry) -> str:\n return entry[\"is_compound\"].lower()", "def _should_send_property(self, key, value):\n to_json = self.trait_metadata(key, 'to_json', self._trait_to_json)\n if key in self._property_lock:\n # model_state, buffer_paths, buffers\n split_value = _remove_buffers({ key: to_json(value, self)})\n split_lock = _remove_buffers({ key: self._property_lock[key]})\n # A roundtrip conversion through json in the comparison takes care of\n # idiosyncracies of how python data structures map to json, for example\n # tuples get converted to lists.\n if (jsonloads(jsondumps(split_value[0])) == split_lock[0]\n and split_value[1] == split_lock[1]\n and _buffer_list_equal(split_value[2], split_lock[2])):\n if self._holding_sync:\n self._states_to_send.discard(key)\n return False\n if self._holding_sync:\n self._states_to_send.add(key)\n return False\n else:\n return True", "def is_food(self) -> bool:\n return self in (self.off, self.off_pro)", "def property( self, prop ):\n raise NotImplementedError(\"property\")", "def crit(p):\n return any([\n is_proj(p),\n is_uuid(p),\n is_sqlite(p),\n contains_uuid_folder(p),\n contains_proj_file(p),\n contains_sqlite(p)\n ])", "def in_struct_code(self):\n return self.mscope is None and self.sscope is not None", "def check_property(self, descriptor):\r\n self.assertEqual(\r\n descriptor._xmodule.get_html(),\r\n descriptor.render('student_view').content\r\n )", "def is_shed_tool_conf(self):", "def is_inequality(self): \n return False", "def isFlow(self) -> bool:\n ...", "def isSource(self):\n return (len(self.parents()) == 0)", "def _IsAngstroem(units):\n if isinstance(units, Atom):\n check = units.store.get(\"units\")\n if not check:\n return False\n else:\n check = units\n return check == \"angstroem\" or check == \"angstroemd0\"", "def is_synthetic(self):\r\n return self.has_label('synthetic')", "def is_simple(self): # -> bool:\n ...", "def __checkPropBonus(self, track):\n result = False\n if self.battle.getInteractivePropTrackBonus() == track:\n result = True\n return result", "def testTypeSingle(self):\n prop = make_prop(kind=bool)\n with self.assertRaises(TypeError):\n prop.interpret(1, {})\n\n self.assertEqual(True, prop.interpret(True, {}))", "def test_spires_syntax_detected_invenio(self):\n # trac #261\n converter = search_engine_query_parser.SpiresToInvenioSyntaxConverter()\n inv_search = converter.is_applicable(\"t:p a:c\")\n self.assertEqual(inv_search, False)", "def is_simple(self):\n return _property_op(arctern.ST_IsSimple, self).astype(bool, copy=False)" ]
[ "0.6570999", "0.6159433", "0.562184", "0.5580678", "0.55424774", "0.54955554", "0.54840237", "0.5463147", "0.5448263", "0.53668535", "0.5354428", "0.53308886", "0.5306961", "0.53069216", "0.5291205", "0.52911955", "0.52770704", "0.52570504", "0.5237203", "0.5223409", "0.5189958", "0.51840895", "0.5181425", "0.51621175", "0.5155834", "0.5142169", "0.51398504", "0.51282984", "0.5108469", "0.50997794", "0.50985783", "0.5085598", "0.49962172", "0.49918044", "0.49884695", "0.49764773", "0.4970087", "0.4970016", "0.49682957", "0.49663636", "0.49574155", "0.49529424", "0.49426302", "0.49407223", "0.49367616", "0.49165815", "0.4916136", "0.49084637", "0.48991436", "0.4893503", "0.48832005", "0.48771796", "0.48763016", "0.48724738", "0.48562586", "0.4854652", "0.48492587", "0.4848161", "0.48463285", "0.48458788", "0.4844984", "0.48439845", "0.48382083", "0.48359475", "0.4833266", "0.4829893", "0.4825633", "0.48218223", "0.4821382", "0.48178512", "0.48169968", "0.4815752", "0.48102522", "0.48091105", "0.4807211", "0.4804855", "0.48014528", "0.4797336", "0.4797336", "0.47800985", "0.4778924", "0.47754005", "0.47635478", "0.47603145", "0.47553167", "0.4754542", "0.47523844", "0.47511566", "0.47445586", "0.4740304", "0.47398564", "0.47391215", "0.47315216", "0.47297952", "0.4724853", "0.47223893", "0.47195998", "0.47180805", "0.47139734", "0.4712855" ]
0.6637285
0
returns star config object for given KOI
def star_config(koi, bands=['g','r','i','z','J','H','K'], unc=dict(g=0.05, r=0.05, i=0.05, z=0.05, J=0.02, H=0.02, K=0.02), **kwargs): folder = os.path.join(KOI_FPPDIR, ku.koiname(koi)) if not os.path.exists(folder): os.makedirs(folder) config = ConfigObj(os.path.join(folder,'star.ini')) koi = ku.koiname(koi) maxAV = koi_maxAV(koi) config['maxAV'] = maxAV mags = ku.KICmags(koi) for band in bands: if not np.isnan(mags[band]): config[band] = (mags[band], unc[band]) config['Kepler'] = mags['Kepler'] kepid = KOIDATA.ix[koi,'kepid'] if use_property(kepid, 'teff'): teff, e_teff = (kicu.DATA.ix[kepid, 'teff'], kicu.DATA.ix[kepid, 'teff_err1']) if not any(np.isnan([teff, e_teff])): config['Teff'] = (teff, e_teff) if use_property(kepid, 'logg'): logg, e_logg = (kicu.DATA.ix[kepid, 'logg'], kicu.DATA.ix[kepid, 'logg_err1']) if not any(np.isnan([logg, e_logg])): config['logg'] = (logg, e_logg) if use_property(kepid, 'feh'): feh, e_feh = (kicu.DATA.ix[kepid, 'feh'], kicu.DATA.ix[kepid, 'feh_err1']) if not any(np.isnan([feh, e_feh])): config['feh'] = (feh, e_feh) for kw,val in kwargs.items(): config[kw] = val return config
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fpp_config(koi, **kwargs):\n folder = os.path.join(KOI_FPPDIR, ku.koiname(koi))\n if not os.path.exists(folder):\n os.makedirs(folder)\n config = ConfigObj(os.path.join(folder,'fpp.ini'))\n\n koi = ku.koiname(koi)\n\n rowefit = jrowe_fit(koi)\n\n config['name'] = koi\n ra,dec = ku.radec(koi)\n config['ra'] = ra\n config['dec'] = dec\n config['rprs'] = rowefit.ix['RD1','val']\n config['period'] = rowefit.ix['PE1', 'val']\n\n config['starfield'] = kepler_starfield_file(koi)\n\n for kw,val in kwargs.items():\n config[kw] = val\n\n config['constraints'] = {}\n config['constraints']['maxrad'] = default_r_exclusion(koi)\n try:\n config['constraints']['secthresh'] = pipeline_weaksec(koi)\n except NoWeakSecondaryError:\n pass\n\n return config", "def get_pixis_config_object(env, src):\n cfg = env.configStore()\n\n o = cfg.get(_psana.Pixis.ConfigV1, src)\n if o is not None: return o\n\n return None", "def get_confg(self):\n\n ini = ConfigParser()\n self.config_parser = ini\n # if isinstance(cfile, (file, StringIO.StringIO, io.BytesIO)):\n if isinstance(self.config_data, str) and self.config_data:\n fp = io.BytesIO(self.config_data)\n ini.readfp(fp)\n elif self.config_file is not None:\n ini.read([self.config_file, os.path.expanduser('~/.' + self.config_file)])\n\n if ini.has_section('whoshere'):\n return ini.items('whoshere')\n\n return {}", "def get_istar_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.iStar.ConfigV1, src)\n if o is not None: return o\n\n return None", "def cg_config():\n return {}", "def get_epix10ka_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Epix.Config10kaV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaV1, src)\n if o is not None: return o\n\n return None", "def config(self):\n annotations = IAnnotations(self.context)\n return annotations.get(CONFIGURATION_KEY, {})", "def get_opal1k_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Opal1k.ConfigV1, src)\n if o is not None: return o\n\n return None", "def get_epix10ka_any_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Epix.Config10ka2MV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10ka2MV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaQuadV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaQuadV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaV1, src)\n if o is not None: return o\n\n return None", "def get_config(self):\n return {}", "def get_iAF1260b_config():\n package_path = get_package_path()\n metabolism_file = os.path.join(package_path, 'bigg_models', 'iAF1260b.json')\n return {'model_path': metabolism_file}", "def config(self):\n return {}", "def get_config(self):\n if self.allow_reco():\n return self.chs_config()\n else:\n return self.get_config_j(self.id)", "def get_epix_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Epix.Config100aV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config100aV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10ka2MV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10ka2MV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaQuadV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaQuadV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10KV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.ConfigV1, src)\n if o is not None: return o\n\n return None", "def get_epix10ka2m_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Epix.Config10ka2MV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10ka2MV1, src)\n if o is not None: return o\n\n return None", "def get_config_object() -> \"BaseConfig\":\n assert (\n len(G_CONFIG_OBJECT) == 1\n ), \"Have you created quantize config object before calling `quantize_model`?\"\n if G_CONFIG_OBJECT:\n return G_CONFIG_OBJECT[0]", "def get_acqiris_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Acqiris.ConfigV1, src)\n if o is not None: return o\n\n return None", "def configuration():", "def istio_config(self) -> Optional[pulumi.Input['IstioConfigArgs']]:\n return pulumi.get(self, \"istio_config\")", "def get_config_template(self) -> cconfig.Config:", "def get_uxi_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Uxi.ConfigV1, src)\n if o is not None: return o\n\n return None", "def get_config(self):\n return {'name': self.name, 'dtype': self.dtype}", "def get_config(self):\n return {\"name\": self.name, \"tunable\": self.tunable}", "def get_princeton_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Princeton.ConfigV5, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Princeton.ConfigV4, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Princeton.ConfigV3, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Princeton.ConfigV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Princeton.ConfigV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Pimax.ConfigV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Pixis.ConfigV1, src)\n if o is not None: return o\n\n return None", "def config(self):\n return self.namespace['config']", "def get_default_config(self):\n if not self.iface_type:\n return None\n\n defaults = {}\n defaults['description'] = self.interface_name + ' Interface'\n defaults['admin'] = 'up'\n if self.is_ethernet:\n defaults['speed'] = 'auto'\n defaults['duplex'] = 'auto'\n defaults['type'] = 'bridged'\n elif self.iface_type == 'Bridge-Aggregation':\n defaults['type'] = 'bridged'\n else:\n defaults['type'] = 'routed'\n\n return defaults", "def get_e_coli_core_config():\n package_path = get_package_path()\n metabolism_file = os.path.join(package_path, 'bigg_models', 'e_coli_core.json')\n return {'model_path': metabolism_file}", "def _get_MindtPy_GOA_config():\n CONFIG = ConfigBlock('MindtPy-GOA')\n\n _add_common_configs(CONFIG)\n _add_goa_configs(CONFIG)\n _add_oa_cuts_configs(CONFIG)\n _add_subsolver_configs(CONFIG)\n _add_tolerance_configs(CONFIG)\n _add_bound_configs(CONFIG)\n return CONFIG", "def config(self) -> Dict[str, Any]:", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def config():\n return _config", "def get_config():\n return CONFIG" ]
[ "0.60090053", "0.5942401", "0.5920423", "0.5912436", "0.5829198", "0.55174524", "0.55119944", "0.54936326", "0.5490124", "0.5460084", "0.54068804", "0.5390276", "0.5330812", "0.5309186", "0.53012615", "0.52819467", "0.52716434", "0.526362", "0.52570665", "0.52288973", "0.5213522", "0.5205488", "0.5160786", "0.5146575", "0.5120113", "0.5111574", "0.51110494", "0.5104246", "0.51017493", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.508643", "0.5077956", "0.50718355" ]
0.6899527
0
returns config object for given KOI
def fpp_config(koi, **kwargs): folder = os.path.join(KOI_FPPDIR, ku.koiname(koi)) if not os.path.exists(folder): os.makedirs(folder) config = ConfigObj(os.path.join(folder,'fpp.ini')) koi = ku.koiname(koi) rowefit = jrowe_fit(koi) config['name'] = koi ra,dec = ku.radec(koi) config['ra'] = ra config['dec'] = dec config['rprs'] = rowefit.ix['RD1','val'] config['period'] = rowefit.ix['PE1', 'val'] config['starfield'] = kepler_starfield_file(koi) for kw,val in kwargs.items(): config[kw] = val config['constraints'] = {} config['constraints']['maxrad'] = default_r_exclusion(koi) try: config['constraints']['secthresh'] = pipeline_weaksec(koi) except NoWeakSecondaryError: pass return config
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_confg(self):\n\n ini = ConfigParser()\n self.config_parser = ini\n # if isinstance(cfile, (file, StringIO.StringIO, io.BytesIO)):\n if isinstance(self.config_data, str) and self.config_data:\n fp = io.BytesIO(self.config_data)\n ini.readfp(fp)\n elif self.config_file is not None:\n ini.read([self.config_file, os.path.expanduser('~/.' + self.config_file)])\n\n if ini.has_section('whoshere'):\n return ini.items('whoshere')\n\n return {}", "def config(self):\n annotations = IAnnotations(self.context)\n return annotations.get(CONFIGURATION_KEY, {})", "def get_epix10ka_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Epix.Config10kaV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaV1, src)\n if o is not None: return o\n\n return None", "def get_opal1k_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Opal1k.ConfigV1, src)\n if o is not None: return o\n\n return None", "def get_config(self):\n if self.allow_reco():\n return self.chs_config()\n else:\n return self.get_config_j(self.id)", "def get_config(self):\n return {\"name\": self.name, \"tunable\": self.tunable}", "def get_config(self):\n return {}", "def star_config(koi, bands=['g','r','i','z','J','H','K'],\n unc=dict(g=0.05, r=0.05, i=0.05, z=0.05,\n J=0.02, H=0.02, K=0.02), **kwargs):\n folder = os.path.join(KOI_FPPDIR, ku.koiname(koi))\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n config = ConfigObj(os.path.join(folder,'star.ini'))\n\n koi = ku.koiname(koi)\n\n maxAV = koi_maxAV(koi)\n config['maxAV'] = maxAV\n\n mags = ku.KICmags(koi)\n for band in bands:\n if not np.isnan(mags[band]):\n config[band] = (mags[band], unc[band])\n config['Kepler'] = mags['Kepler']\n\n kepid = KOIDATA.ix[koi,'kepid']\n\n if use_property(kepid, 'teff'):\n teff, e_teff = (kicu.DATA.ix[kepid, 'teff'],\n kicu.DATA.ix[kepid, 'teff_err1'])\n if not any(np.isnan([teff, e_teff])):\n config['Teff'] = (teff, e_teff)\n\n if use_property(kepid, 'logg'):\n logg, e_logg = (kicu.DATA.ix[kepid, 'logg'],\n kicu.DATA.ix[kepid, 'logg_err1'])\n if not any(np.isnan([logg, e_logg])):\n config['logg'] = (logg, e_logg)\n\n if use_property(kepid, 'feh'):\n feh, e_feh = (kicu.DATA.ix[kepid, 'feh'],\n kicu.DATA.ix[kepid, 'feh_err1'])\n if not any(np.isnan([feh, e_feh])):\n config['feh'] = (feh, e_feh)\n\n for kw,val in kwargs.items():\n config[kw] = val\n\n return config", "def get_config_template(self) -> cconfig.Config:", "def config():\n return _config", "def get_uxi_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Uxi.ConfigV1, src)\n if o is not None: return o\n\n return None", "def get_acqiris_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Acqiris.ConfigV1, src)\n if o is not None: return o\n\n return None", "def istio_config(self) -> Optional[pulumi.Input['IstioConfigArgs']]:\n return pulumi.get(self, \"istio_config\")", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def get_config():\n return _config", "def load_config(self):\n with h5py.File(iop.fields, 'r') as hf:\n # fields = hf.get('data')[:]\n config = hf.get('config')[:]\n return config", "def config(self):\n return {}", "def config():\n return Config()", "def config():\n return Config()", "def config(self):\n return self.namespace['config']", "def get_config(self):\n E = data_element_maker()\n top = E.top(\n E.Ifmgr(\n E.Interfaces(\n E.Interface(\n E.IfIndex(self.iface_index)\n )\n )\n )\n )\n\n nc_get_reply = self.device.get(('subtree', top))\n reply_data = find_in_data(self._iface_row_name, nc_get_reply.data_ele)\n\n if reply_data is None:\n return {}\n\n return data_elem_to_dict(reply_data, self._key_map, value_map=self._value_map)", "def config(self) -> Dict[str, Any]:", "def get_config_object() -> \"BaseConfig\":\n assert (\n len(G_CONFIG_OBJECT) == 1\n ), \"Have you created quantize config object before calling `quantize_model`?\"\n if G_CONFIG_OBJECT:\n return G_CONFIG_OBJECT[0]", "def get_epix_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Epix.Config100aV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config100aV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10ka2MV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10ka2MV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaQuadV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaQuadV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10KV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.ConfigV1, src)\n if o is not None: return o\n\n return None", "def config(self):\n return self[CONFIG_KEY]", "def get_config():\n return CONFIG", "def get_istar_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.iStar.ConfigV1, src)\n if o is not None: return o\n\n return None", "def get_config(self):\n return {'name': self.name, 'dtype': self.dtype}", "def get_epix10ka2m_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Epix.Config10ka2MV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10ka2MV1, src)\n if o is not None: return o\n\n return None", "def cg_config():\n return {}", "def get_pixis_config_object(env, src):\n cfg = env.configStore()\n\n o = cfg.get(_psana.Pixis.ConfigV1, src)\n if o is not None: return o\n\n return None", "def get_configuration(self) -> dict:\n return self.config" ]
[ "0.64626646", "0.6120666", "0.6065999", "0.6041188", "0.6032176", "0.599006", "0.5987609", "0.5959099", "0.59461635", "0.59364283", "0.5936055", "0.59172744", "0.59112185", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.59012616", "0.5870811", "0.5866335", "0.5862595", "0.5862595", "0.5859725", "0.58510786", "0.5848893", "0.5839737", "0.5839378", "0.5835709", "0.5832539", "0.5832016", "0.58251214", "0.579824", "0.5785709", "0.57646376", "0.5759715" ]
0.6510853
0
Predict a single batch of images, optionally with augmentation. Augmentations vectorized across the entire batch and predictions averaged.
def predict_batch(self, imgs_batch, augment=False): if augment: aug_funcs = [ lambda x: x, # identity lambda x: x[:, ::-1, ...], # vlip lambda x: x[:, :, ::-1], # hflip lambda x: np.rot90(x, 1, axes=(1, 2)), # +90 lambda x: np.rot90(x, 2, axes=(1, 2)), # +180 lambda x: np.rot90(x, 3, axes=(1, 2)), # +270 lambda x: np.rot90(x, 1, axes=(1, 2))[:, ::-1, ...], # vflip(+90) lambda x: np.rot90(x, 1, axes=(1, 2))[:, :, ::-1] # vflip(+90) ] yp = np.zeros((imgs_batch.shape[0], len(TAGS))) for aug_func in aug_funcs: imgs_batch = aug_func(imgs_batch) tags_batch = self.net.predict(imgs_batch) yp += tags_batch / len(aug_funcs) return yp else: return self.net.predict_on_batch(imgs_batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict(self, images, batch_size):\n pass", "def warmup_predict(model, imgs, Npred):\n H = augmented_state_matrix(model[:-1], imgs, 0)\n h0 = H[-2]\n y0 = imgs[-1]\n return predict(model, y0, h0, Npred)", "def predict_on_batch(self, input_batch):\n from deeplift.util import run_function_in_batches\n from deeplift.util import compile_func\n x_standardized = self.model._batch_to_list(input_batch)\n if self.fwd_predict_fn is None:\n # TODO: Once DeepLIFT layer annotation works integrate it here too:\n \"\"\"\n # identify model output layers:\n self.output_layers_idxs = []\n for output_name in self.model.model.output_names:\n for i, l in enumerate(self.model.model.layers):\n if l.name == output_name:\n self.output_layers_idxs.append(i)\n \"\"\"\n inputs = [self.deeplift_model.get_layers()[i].get_activation_vars()\n for i in self.input_layer_idxs]\n outputs = [self.deeplift_model.get_layers()[i].get_activation_vars()\n for i in self.output_layers_idxs]\n self.fwd_predict_fn = compile_func(inputs, outputs)\n\n preds = run_function_in_batches(\n input_data_list=x_standardized,\n func=self.fwd_predict_fn,\n batch_size=self.batch_size,\n progress_update=None)\n\n preds = np.array(preds)\n if len(self.output_layers_idxs) == 1:\n preds = preds[0, ...]\n\n return preds", "def predict(self, X, pred_batch_size=None):", "def batched_predict(model, batcher, batch_size, int_mapped_X, doc_labels):\n # Intialize batcher but dont shuffle.\n train_batcher = batcher(full_X=int_mapped_X, full_y=doc_labels,\n batch_size=batch_size, shuffle=False)\n preds = []\n for batch_X, _ in train_batcher.next_batch():\n batch_preds = model.predict(batch_X=batch_X)\n preds.append(batch_preds)\n preds = np.hstack(preds)\n return preds", "def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> np.ndarray:\n raise NotImplementedError", "def predict(self):\n batch = get_predict_batch(1, num_rec_out=self.num_test_rec)\n self.g_model.test_batch(\n batch, self.global_step, num_rec_out=self.num_test_rec)", "def model_predict(img, model, preprocess_func):\n img = img.resize((224, 224)) # Each model expects shape: (224, 224, 3)\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n\n x = preprocess_func(x)\n preds = model.predict(x)\n return preds", "def predict_all(self, imgs):\n return self._predict(imgs)", "def predict_on_batch(engine, batch):\n\t\tengine.model.eval()\n\t\tengine.model.rpn.nms_thresh = 0.3\n\t\twith torch.no_grad():\n\t\t\timgs, target = prepare_batch(batch, device=get_device(engine.model))\n\t\t\ty_pred = engine.model(imgs)\n\t\treturn y_pred, target", "def predict(self, dataset, transformers=[]):\n y_preds = []\n n_tasks = self.get_num_tasks()\n ind = 0\n\n for (X_batch, _, _, ids_batch) in dataset.iterbatches(\n self.batch_size, deterministic=True):\n n_samples = len(X_batch)\n y_pred_batch = self.predict_on_batch(X_batch)\n # Discard any padded predictions\n y_pred_batch = y_pred_batch[:n_samples]\n y_pred_batch = np.reshape(y_pred_batch, (n_samples, n_tasks))\n y_pred_batch = undo_transforms(y_pred_batch, transformers)\n y_preds.append(y_pred_batch)\n y_pred = np.vstack(y_preds)\n\n # The iterbatches does padding with zero-weight examples on the last batch.\n # Remove padded examples.\n n_samples = len(dataset)\n y_pred = np.reshape(y_pred, (n_samples, n_tasks))\n # Special case to handle singletasks.\n if n_tasks == 1:\n y_pred = np.reshape(y_pred, (n_samples,))\n return y_pred", "def predict(self, inputs: Tuple[Tensor], batch_img_metas: List[dict],\n test_cfg: ConfigType):\n if self.use_se_loss:\n seg_logits = self.forward(inputs)[0]\n else:\n seg_logits = self.forward(inputs)\n return self.predict_by_feat(seg_logits, batch_img_metas)", "def infer(self, x, batch_size=None, **kwargs):\n if not batch_size:\n batch_size = self.batch_size\n return self.model.predict(x, batch_size, **kwargs)", "def predict(predict_var, x_unlabeled, inputs, batch_sizes, view_size):\n x = x_unlabeled\n\n # calculate batches for predict loop\n unlabeled_batch_size = batch_sizes.get(\"Embedding\", 0)\n batch_size = min(len(x[0]), unlabeled_batch_size)\n batches = make_batches(len(x[0]), batch_size)\n\n y_preds = []\n # predict over all points\n for j, (batch_start, batch_end) in enumerate(batches):\n feed_dict = {K.learning_phase(): 0}\n # feed corresponding input for each input_type\n for input_type, input_placeholder in inputs.items():\n if input_type == \"Embedding\":\n for i in range(view_size):\n feed_dict[input_placeholder[i]] = x[i][batch_start:batch_end]\n elif input_type == \"Orthogonal\":\n batch_ids = np.random.choice(\n len(x), size=min(len(x), batch_sizes[input_type]), replace=False\n )\n for i in range(view_size):\n feed_dict[input_placeholder[i]] = x[i][batch_ids]\n else:\n raise Exception(\"Unrecognized feed name ['{}']\".format(input_type))\n # evaluate the batch\n y_pred_batch = np.asarray(K.get_session().run(predict_var, feed_dict=feed_dict))\n y_preds.append(y_pred_batch)\n y_list = np.concatenate(y_preds, axis=1)\n\n return y_list", "def predict(model, img, target_size=(229, 229)): #fixed size for InceptionV3 architecture\r\n if img.size != target_size:\r\n img = img.resize(target_size)\r\n\r\n x = image.img_to_array(img)\r\n x = np.expand_dims(x, axis=0)\r\n x = preprocess_input(x)\r\n preds = model.predict(x)\r\n return preds[0]", "def predict(self, x, logits=False, batch_size=128):\n\n # Apply defences\n x_preproc = self._apply_processing(x)\n x_preproc, _ = self._apply_defences(x_preproc, None, fit=False)\n\n # Run predictions with batching\n preds = np.zeros((x_preproc.shape[0], self.nb_classes))\n for batch_index in range(int(np.ceil(x_preproc.shape[0] / float(batch_size)))):\n begin, end = batch_index * batch_size, min((batch_index + 1) * batch_size, x_preproc.shape[0])\n preds[begin:end] = self._preds([x_preproc[begin:end]])[0]\n\n if not logits and not self._custom_activation:\n exp = np.exp(preds[begin:end] - np.max(preds[begin:end], axis=1, keepdims=True))\n preds[begin:end] = exp / np.sum(exp, axis=1, keepdims=True)\n\n return preds", "def predict_batch(self, model, context, data=None):\n pass", "def predict(self,Xpred, nsamples=2000, tune=100, progress=True, points2=[]):\n if self.type_y=='affine':\n return self.predict_affine(Xpred, nsamples, tune, progress, points2)\n elif self.type_y=='regression':\n return self.predict_regression(Xpred, nsamples, tune, progress, points2)\n elif self.type_y=='mixed':\n return self.predict_mixed(Xpred, nsamples, tune, progress, points2)", "def predict_step(self, *args: Any, **kwargs: Any) -> Tensor:\n batch = args[0]\n x = batch[\"image\"]\n y_hat: Tensor = self(x).softmax(dim=1)\n return y_hat", "def predict_dataset(filenames, path, model, model_preprocess_function):\n y_predicted = []\n batch_size = 32\n batch = []\n for filename in filenames:\n batch.append(preprocess(path+filename, model_preprocess_function))\n if len(batch) >= batch_size:\n y_predicted = y_predicted + model.predict(np.array(batch)).tolist()\n batch = []\n y_predicted = y_predicted + model.predict(np.array(batch)).tolist()\n return y_predicted", "def predict(model, images):\n return model.predict_classes(images)", "def predict(self, inputs, oversample=True):\r\n # Scale to standardize input dimensions.\r\n input_ = np.zeros((len(inputs),\r\n self.image_dims[0],\r\n self.image_dims[1],\r\n inputs[0].shape[2]),\r\n dtype=np.float32)\r\n print inputs[0].shape\r\n print input_.shape\r\n for ix, in_ in enumerate(inputs):\r\n input_[ix] = caffe.io.resize_image(in_, self.image_dims)\r\n\r\n # if oversample:\r\n # # Generate center, corner, and mirrored crops.\r\n # input_ = caffe.io.oversample(input_, self.crop_dims)\r\n # else:\r\n # # Take center crop.\r\n # center = np.array(self.image_dims) / 2.0\r\n # crop = np.tile(center, (1, 2))[0] + np.concatenate([\r\n # -self.crop_dims / 2.0,\r\n # self.crop_dims / 2.0\r\n # ])\r\n # crop = crop.astype(int)\r\n # input_ = input_[:, crop[0]:crop[2], crop[1]:crop[3], :]\r\n\r\n # Classify\r\n caffe_in = np.zeros(np.array(input_.shape)[[0, 3, 1, 2]],\r\n dtype=np.float32)\r\n for ix, in_ in enumerate(input_):\r\n caffe_in[ix] = self.transformer.preprocess(self.inputs[0], in_)\r\n out = self.forward_all(**{self.inputs[0]: caffe_in})\r\n predictions = out[self.outputs[0]]\r\n\r\n # # For oversampling, average predictions across crops.\r\n # if oversample:\r\n # predictions = predictions.reshape((len(predictions) / 10, 10, -1))\r\n # predictions = predictions.mean(1)\r\n\r\n return predictions", "def predict(model, img, target_size):\n if img.size != target_size:\n img = img.resize(target_size)\n\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n preds = model.predict(x)\n return preds[0]", "def predict(model, img, target_size):\n if img.size != target_size:\n img = img.resize(target_size)\n\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n preds = model.predict(x)\n return preds[0]", "def predict(self, x, **kwargs):\n kwargs = self.filter_sk_params(Sequential.predict, kwargs)\n return np.squeeze(self.model.predict(x, **kwargs))", "def predict(trained_model, X_test, y_test, image_name):\n if MODEL == 1:\n return predict_1(trained_model, X_test, y_test)\n elif MODEL == 3:\n if CROSS_VALIDATION:\n return cv_predict_3(trained_model, X_test, y_test)\n else:\n return predict_3(trained_model, X_test, y_test, image_name)\n elif MODEL == 2:\n return predict_2(trained_model, X_test, y_test)\n else:\n # For models 4, 5 and 6\n return predict_4(trained_model, X_test, y_test)", "def predict_batches(model, X, batchsize=None):\n if batchsize is None:\n batchsize = model.flags.bs\n pred = []\n for batch in grouper(X, batchsize):\n pred.append(model.predict(np.array(batch)))\n\n return np.concatenate(pred)", "def prediction(self, X):\n images = self.preprocess_images(X)\n return self.model.predict(images)", "def predict(self):\n self.predicted_test_summary = []\n for step in xrange(0, self.test_size // self.test_batch_size):\n print 'Predicting Batch No.:', step\n offset = (step * self.test_batch_size) % self.test_size\n batch_data_fwd = self.X_tst_fwd[offset:(offset + self.test_batch_size), :].T\n batch_data_bwd = self.X_tst_bwd[offset:(offset + self.test_batch_size), :].T\n summary_test_out = self._predict_batch(batch_data_fwd, batch_data_bwd)\n self.predicted_test_summary.extend(summary_test_out)\n\n print 'Prediction Complete. Moving Forward..'\n\n # test answers\n self.test_review = self.X_tst_fwd\n self.predicted_test_summary = self.predicted_test_summary\n self.true_summary = self.Y_tst", "def predict(model, img):\n\tx = image.img_to_array(img)\n\tx = np.expand_dims(x, axis=0)\n\tx = preprocess_input(x)\n\tpreds = model.predict(x)\n\treturn preds[0]", "def predict(self, X, pred_batch_size=None):\n predictions = self.decision_function(X, pred_batch_size)\n return predictions", "def predict(self, x, batch_size=None, verbose=0,\n steps=None, callbacks=None, max_queue_size=10, workers=1,\n use_multiprocessing=False):\n sz = x.shape\n x = np.reshape(x, (sz[0]*sz[1]*sz[2], sz[3]))\n predicted_output = self.model.predict(\n x=x, batch_size=batch_size, verbose=verbose, steps=steps,\n callbacks=callbacks, max_queue_size=max_queue_size,\n workers=workers, use_multiprocessing=use_multiprocessing)\n predicted_output = np.float32(np.reshape(predicted_output,\n (sz[0], sz[1], sz[2], sz[3])))\n return predicted_output", "def predict_on_batch(self, x):\n # TODO: Understand how pytorch models could return multiple outputs\n import torch\n from torch.autograd import Variable\n\n if isinstance(x, np.ndarray):\n # convert to a pytorch tensor and then to a pytorch variable\n input = self._torch_var(torch.from_numpy(self.correct_neg_stride(x)))\n pred = self.model(input)\n\n elif isinstance(x, dict):\n # convert all entries in the dict to pytorch variables\n input_dict = {k: self._torch_var(torch.from_numpy(self.correct_neg_stride(x[k]))) for k in x}\n pred = self.model(**input_dict)\n\n elif isinstance(x, list):\n # convert all entries in the list to pytorch variables\n input_list = [self._torch_var(torch.from_numpy(self.correct_neg_stride(el))) for el in x]\n pred = self.model(*input_list)\n\n else:\n raise Exception(\"Input not supported!\")\n\n # convert results back to numpy arrays\n if isinstance(pred, Variable):\n pred_np = self._torch_var_to_numpy(pred)\n\n elif isinstance(pred, dict):\n pred_np = {k: self._torch_var_to_numpy(pred[k]) for k in pred}\n\n elif isinstance(pred, list) or isinstance(pred, tuple):\n pred_np = [self._torch_var_to_numpy(el) for el in pred]\n\n else:\n raise Exception(\"Model output format not supported!\")\n\n return pred_np", "def predict(self, data, version='default'):\n if isinstance(data, list):\n inputs = [self._indarray(x) for x in data]\n else:\n inputs = [self._indarray(data)]\n\n classification_response = self.skil.api.multipredict(\n deployment_name=self.deployment.name,\n model_name=self.model_name,\n version_name=version,\n body=skil_client.MultiPredictRequest(\n id=str(uuid.uuid1()),\n needs_pre_processing=False,\n inputs=inputs\n )\n )\n outputs = classification_response.outputs\n outputs = [np.asarray(o.data).reshape(o.shape) for o in outputs]\n if len(outputs) == 1:\n return outputs[0]\n return outputs", "def predict(\n self,\n *args,\n batch: Optional[List[Dict[str, Any]]] = None,\n add_tokens: bool = False,\n add_attributions: bool = False,\n attributions_kwargs: Optional[Dict] = None,\n **kwargs,\n ) -> Union[Dict[str, numpy.ndarray], List[Dict[str, numpy.ndarray]]]:\n if args or kwargs:\n batch = [self._map_args_kwargs_to_input(*args, **kwargs)]\n\n prediction_config = PredictionConfiguration(\n add_tokens=add_tokens,\n add_attributions=add_attributions,\n attributions_kwargs=attributions_kwargs or {},\n )\n\n predictions = self._model.predict(batch, prediction_config)\n\n predictions_dict = [prediction.as_dict() for prediction in predictions]\n\n return predictions_dict[0] if (args or kwargs) else predictions_dict", "def predict_single(self, data, version='default'):\n if isinstance(data, list):\n inputs = [self._indarray(np.expand_dims(x, 0)) for x in data]\n else:\n inputs = [self._indarray(np.expand_dims(data, 0))]\n\n classification_response = self.skil.api.multipredict(\n deployment_name=self.deployment.name,\n model_name=self.model_name,\n version_name=version,\n body=skil_client.MultiPredictRequest(\n id=str(uuid.uuid1()),\n needs_pre_processing=False,\n inputs=inputs\n )\n )\n # TODO should support multi-out\n output = classification_response.outputs[0]\n return np.asarray(output.data).reshape(output.shape)", "def predict(self, batch_inputs_dict: dict, batch_data_samples: SampleList,\n **kwargs) -> SampleList:\n x = self.extract_feat(batch_inputs_dict)\n results_list = self.bbox_head.predict(x, batch_data_samples, **kwargs)\n predictions = self.add_pred_to_datasample(batch_data_samples,\n results_list)\n return predictions", "def batch_predict(filenames, net):\n N, C, H, W = net.blobs[net.inputs[0]].data.shape\n F = net.blobs[net.outputs[0]].data.shape[1]\n Nf = len(filenames)\n allftrs = np.zeros((Nf, F))\n #allpreds = []\n for i in range(0, Nf, N):\n tic = time.time()\n in_data = np.zeros((N, C, H, W), dtype=np.float32)\n\n batch_range = range(i, min(i+N, Nf))\n batch_filenames = [filenames[j] for j in batch_range]\n Nb = len(batch_range)\n\n batch_images = np.zeros((Nb, 3, H, W))\n for j,fname in enumerate(batch_filenames):\n im = np.array(Image.open(fname))\n \n if len(im.shape) == 2:\n im = np.tile(im[:,:,np.newaxis], (1,1,3))\n # RGB -> BGR\n im = im[:,:,(2,1,0)]\n # mean subtraction\n im = im - np.array([103.939, 116.779, 123.68])\n # resize\n im = imresize(im, (H, W))\n # get channel in correct dimension\n im = np.transpose(im, (2, 0, 1))\n batch_images[j,:,:,:] = im\n\n # insert into correct place\n in_data[0:len(batch_range), :, :, :] = batch_images\n \n # predict features\n ftrs = predict(in_data, net)\n toc = time.time()\n \n for j in range(len(batch_range)):\n allftrs[i+j,:] = ftrs[j,:]\n\n return allftrs", "def predict(self, data: np.array) -> np.array:\n return self.model.predict(squeeze_keep_batch(data))", "def predict(self, images: ImageSource, conf: Optional[float] = None, batch_size: int = 32, fuse_model: bool = True) -> ImagesPoseEstimationPrediction:\n pipeline = self._get_pipeline(conf=conf, fuse_model=fuse_model)\n return pipeline(images, batch_size=batch_size) # type: ignore", "def infer(trainer, data_dir, patch_size, output_dir=None, device='cpu'):\n\n if output_dir is not None and not osp.exists(output_dir):\n os.mkdir(output_dir)\n\n data_dir = Path(data_dir).expanduser()\n img_paths = list((data_dir / 'images').iterdir())\n\n print(f'Predicting {len(img_paths)} images from {data_dir} ...')\n predictions = [\n predict(trainer, img_path, patch_size, device=device)\n for img_path in tqdm(img_paths)\n ]\n\n if output_dir is not None:\n save_predictions(predictions, img_paths, output_dir)\n\n return predictions", "def predict(self, inputs, continuation=True):\n\n if inputs.ndim < 2:\n inputs = np.reshape(inputs, (-1, len(inputs)))\n\n # set noise term to zero for state update during test\n self.noise = 0\n\n T_test = inputs.shape[-1]\n\n if continuation:\n last_state = self.last_state\n last_input = self.last_input\n last_output = self.last_output\n else:\n last_state = np.zeros(self.n_reservoir)\n last_input = np.zeros(self.n_inputs)\n last_output = np.zeros(self.n_outputs)\n\n inputs = np.hstack([last_input.reshape(-1, 1), inputs])\n states = np.hstack([last_state.reshape(-1, 1), np.zeros((self.n_reservoir, T_test))])\n outputs = np.hstack([last_output.reshape(-1, 1), np.zeros((self.n_outputs, T_test))])\n\n # process test set one sample at a time\n for t in range(T_test):\n # next state\n states[:, t + 1] = self._update(states[:, t], inputs[:, t])\n # predicted output\n outputs[:, t + 1] = self.read_out(states[:, t + 1])\n\n # stack up new states\n self.states = np.hstack((self.states, states[:, 1:]))\n\n return outputs[:, 1:]", "def predict_once(\n self, x: np.ndarray, batch_size: int = 128, return_nbest=False,return_alignments=False, return_confidence=False, **kwargs\n ) -> Union[Tuple[np.ndarray, np.ndarray], np.ndarray]:\n import torch # lgtm [py/repeated-import]\n \n x_ = np.array([x_i for x_i in x] + [np.array([0.1]), np.array([0.1, 0.2])])[:-2]\n\n # Put the model in the eval mode\n self._model.eval()\n # Apply preprocessing\n x_preprocessed, _ = self._apply_preprocessing(x_, y=None, fit=False)\n\n # Transform x into the model input space\n inputs, targets, input_rates, target_sizes, batch_idx = self.transform_model_input(x=x_preprocessed)\n\n # Compute real input sizes\n input_sizes = input_rates.mul_(inputs.size()[-1]).int()\n\n # Run prediction with batch processing\n results = []\n result_output_sizes = np.zeros(x_preprocessed.shape[0], dtype=np.int)\n num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size)))\n\n for m in range(num_batch):\n # Batch indexes\n begin, end = (\n m * batch_size,\n min((m + 1) * batch_size, x_preprocessed.shape[0]),\n )\n\n # Call to DeepSpeech model for prediction\n with torch.no_grad():\n outputs, output_sizes = self._model(\n inputs[begin:end].to(self._device), input_sizes[begin:end].to(self._device)\n )\n\n results.append(outputs)\n result_output_sizes[begin:end] = output_sizes.detach().cpu().numpy()\n\n # Aggregate results\n result_outputs = np.zeros(\n (x_preprocessed.shape[0], result_output_sizes.max(), results[0].shape[-1]), dtype=np.float32\n )\n\n for m in range(num_batch):\n # Batch indexes\n begin, end = (\n m * batch_size,\n min((m + 1) * batch_size, x_preprocessed.shape[0]),\n )\n\n # Overwrite results\n result_outputs[begin:end, : results[m].shape[1], : results[m].shape[-1]] = results[m].cpu().numpy()\n\n # Rearrange to the original order\n result_output_sizes_ = result_output_sizes.copy()\n result_outputs_ = result_outputs.copy()\n result_output_sizes[batch_idx] = result_output_sizes_\n result_outputs[batch_idx] = result_outputs_\n if np.isnan(result_outputs).any():\n logger.warning(\"NaN output encountered ; reloading model weights.\")\n self.reload_model()\n # Check if users want transcription outputs\n transcription_output = kwargs.get(\"transcription_output\")\n if transcription_output is None or transcription_output is False:\n return (result_outputs, result_output_sizes), None, None \n \n # Now users want transcription outputs\n # Compute transcription\n\n decoded_output, offsets, scores = self.decoder.decode(\n torch.tensor(result_outputs, device=self._device), torch.tensor(result_output_sizes, device=self._device)\n )\n if not return_nbest:\n decoded_output = [do[0] for do in decoded_output]\n decoded_output = np.array(decoded_output)\n offsets = [ofs[0].cpu().numpy() for ofs in offsets]\n offsets = np.array(offsets)\n scores = [sc[0].cpu().numpy() for sc in scores]\n scores = np.array(scores)\n \n if return_alignments or return_confidence:\n results=[decoded_output] + ([offsets] if return_alignments else []) + ([scores] if return_confidence else [])\n return tuple(results)\n return decoded_output", "def predict_batch(self, states: np.ndarray):\n return self.model(states, training=False)", "def predict(self, images, batch_size=1):\n predictions = []\n \n for image in images.astype(\"float\"):\n filtered_image = self.apply_filter(image)\n _, pred = cv2.threshold(filtered_image.astype('uint8'), 0, 1, cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n predictions.append(pred)\n \n return np.reshape(predictions, images.shape)", "def predict(self, test_inputs, batch_size=None):\n if batch_size is None:\n num_batches = 1\n else:\n num_batches = util.ceil_divide(test_inputs.shape[0], batch_size)\n\n test_inputs = np.array_split(test_inputs, num_batches)\n pred_means = util.init_list(0.0, [num_batches])\n pred_vars = util.init_list(0.0, [num_batches])\n for i in range(num_batches):\n pred_means[i], pred_vars[i] = self.session.run(\n self.predictions, feed_dict={self.test_inputs: test_inputs[i]})\n\n return np.concatenate(pred_means, axis=0), np.concatenate(pred_vars, axis=0)", "def __predict_batch(self, model: AutoModel, batch: Tuple):\n input_ids_batch = batch[0]\n token_type_ids_batch = batch[1]\n attention_mask_batch = batch[2]\n\n output = model(\n input_ids=input_ids_batch,\n token_type_ids=token_type_ids_batch,\n attention_mask=attention_mask_batch,\n )\n\n logits = output.logits\n preds_batch = np.argmax(torch.softmax(logits, dim=1).detach().numpy(), axis=1)\n preds_batch_list = list(preds_batch)\n\n return preds_batch_list", "def predict(self, x, batch_size=1):\n assert self.model, \"Model was not initialized\"\n return self.model.predict(x, batch_size=batch_size)", "def predict(self, model, batch):\n device = list(model.parameters())[0].device\n batch = batch.to(device)\n inputs = batch.inputs\n # Extract features with the model\n h = model(*inputs)\n # predictions\n return self.predict_on_features(h)", "def predict_one_image(img_path, prediction_model):\n # Load image and resize it\n img = image.load_img(img_path, target_size=(224, 224))\n # Transform it in array\n x = image.img_to_array(img)\n # Expand array dimension\n x = np.expand_dims(x, axis=0)\n # Make prediction\n prediction_score = prediction_model.predict(x)\n return prediction_score", "def predict_images(self, img_paths):\n img_gen = ImageGenerator(img_paths, batch_size=1, shuffle=False, normalize='std_norm', augmentation=False)\n\n return self.model.predict_generator(img_gen, verbose=1)", "def predict(self, image):\n if len(image.shape) == 3:\n return self._predict_single(image)\n elif len(image.shape) == 4:\n return self._predict_batch(image)\n else:\n raise ValueError('Wrong image format.')", "def predict(trainer, img_path, patch_size, device='cpu'):\n\n img = imread(img_path)\n patches = divide_image_to_patches(img, patch_size)\n predictions = []\n\n for patch in patches:\n input_ = TF.to_tensor(Image.fromarray(patch)).to(device).unsqueeze(0)\n prediction = trainer.postprocess(trainer.model(input_))\n prediction = prediction.detach().cpu().numpy()\n predictions.append(prediction[..., np.newaxis])\n\n predictions = np.concatenate(predictions)\n\n return combine_patches_to_image(predictions, img.shape[0], img.shape[1])", "def predict_batch(net, batch):\n with torch.no_grad():\n out = net(batch)\n _, predicted = torch.max(out.data, 1)\n return predicted", "def batch_predict(\n self, batch_in: Union[Tuple[tf.Tensor, ...], Tuple[np.ndarray, ...]]\n ) -> Dict[Text, Union[tf.Tensor, Dict[Text, tf.Tensor]]]:\n if self.all_labels_embed is None:\n raise ValueError(\n \"The model was not prepared for prediction. \"\n \"Call `prepare_for_predict` first.\"\n )\n\n tf_batch_data = self.batch_to_model_data_format(\n batch_in, self.predict_data_signature\n )\n self._compute_dialogue_indices(tf_batch_data)\n\n dialogue_in, text_output, text_sequence_lengths = self._process_batch_data(\n tf_batch_data\n )\n (\n dialogue_embed,\n dialogue_mask,\n dialogue_transformer_output,\n attention_weights,\n ) = self._embed_dialogue(dialogue_in, tf_batch_data)\n dialogue_mask = tf.squeeze(dialogue_mask, axis=-1)\n\n sim_all, scores = self._tf_layers[\n f\"loss.{LABEL}\"\n ].get_similarities_and_confidences_from_embeddings(\n dialogue_embed[:, :, tf.newaxis, :],\n self.all_labels_embed[tf.newaxis, tf.newaxis, :, :],\n dialogue_mask,\n )\n\n predictions = {\n \"scores\": scores,\n \"similarities\": sim_all,\n DIAGNOSTIC_DATA: {\"attention_weights\": attention_weights},\n }\n\n if (\n self.config[ENTITY_RECOGNITION]\n and text_output is not None\n and text_sequence_lengths is not None\n ):\n pred_ids, confidences = self._batch_predict_entities(\n tf_batch_data,\n dialogue_transformer_output,\n text_output,\n text_sequence_lengths,\n )\n name = ENTITY_ATTRIBUTE_TYPE\n predictions[f\"e_{name}_ids\"] = pred_ids\n predictions[f\"e_{name}_scores\"] = confidences\n\n return predictions", "def predict_img_only(self,\n imgs: Tensor,\n batch_data_samples: List[Det3DDataSample],\n rescale: bool = True) -> List[InstanceData]:\n\n assert self.with_img_bbox, 'Img bbox head must be implemented.'\n assert self.with_img_backbone, 'Img backbone must be implemented.'\n assert self.with_img_rpn, 'Img rpn must be implemented.'\n assert self.with_img_roi_head, 'Img roi head must be implemented.'\n x = self.extract_img_feat(imgs)\n\n # If there are no pre-defined proposals, use RPN to get proposals\n if batch_data_samples[0].get('proposals', None) is None:\n rpn_results_list = self.img_rpn_head.predict(\n x, batch_data_samples, rescale=False)\n else:\n rpn_results_list = [\n data_sample.proposals for data_sample in batch_data_samples\n ]\n\n results_list = self.img_roi_head.predict(\n x, rpn_results_list, batch_data_samples, rescale=rescale)\n\n return results_list", "def predict(self, images):\n\t\t#testing_dataset = tf.data.Dataset.from_tensor_slices(images)\n\t\ttf.keras.backend.set_learning_phase(0)\n\t\ttesting_dataset = tf.data.Dataset.from_tensor_slices(np.asarray(images)).map(lambda x: tf.image.resize(x, [self.image_size, self.image_size]) / 255.0)\n\t\t#testing_dataset_shape = tf.data.Dataset.from_tensor_slices(np.full((len(images), 2), 500, dtype=np.int32))\n\t\ttesting_iterator_X = tf.data.Dataset.zip((testing_dataset, )).batch(self.batch_size).make_initializable_iterator()\n\n\t\tself.sess.run(testing_iterator_X.initializer)\n\t\ttesting_handle_X = self.sess.run(testing_iterator_X.string_handle())\n\n\t\tfinal_output = np.zeros([len(images), 500, 500, num_classes])\n\t\tj = 0\n\t\tcount = 0\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\t[test_output] = self.sess.run(\n\t\t\t\t\t[self.output],\n\t\t\t\t\t\tfeed_dict={\n\t\t\t\t\t\t\tself.is_training: False,\n\t\t\t\t\t\t\tself.handle_X: testing_handle_X,\n\t\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t\tthis_len = len(test_output)\n\t\t\t\tfor z in range(len(test_output)):\n\t\t\t\t\tfor dim in range(num_classes):\n\t\t\t\t\t\tfinal_output[count+z:count+z+1, :, :, dim] = scipy.misc.imresize(test_output[z, :, :, dim], [500, 500])\n\n\t\t\t\t#final_output[count:count+this_len, :, :, :] = test_output\n\t\t\t\tto = final_output[count:count+this_len, :, :, :].argmax(axis=-1)\n\t\t\t\t'''\n\t\t\t\tpdb.set_trace()\n\t\t\t\tfor z in range(this_len):\n\t\t\t\t\tplt.matshow(to[z])\n\t\t\t\t\tplt.colorbar()\n\t\t\t\t\tplt.show()\n\t\t\t\t'''\n\t\t\t\tcount += this_len\n\t\t\t\tprint(f'Batch: {j}')\n\t\t\t\tj += 1\n\t\t\texcept tf.errors.OutOfRangeError:\n\t\t\t\tbreak\n\t\treturn final_output", "def predict_by_feat(self,\n mask_preds: List[Tensor],\n results_list: InstanceList,\n batch_img_metas: List[dict],\n rescale: bool = True,\n **kwargs) -> InstanceList:\n assert len(mask_preds) == len(results_list) == len(batch_img_metas)\n\n for img_id in range(len(batch_img_metas)):\n img_meta = batch_img_metas[img_id]\n results = results_list[img_id]\n bboxes = results.bboxes\n mask_pred = mask_preds[img_id]\n if bboxes.shape[0] == 0 or mask_pred.shape[0] == 0:\n results_list[img_id] = empty_instances(\n [img_meta],\n bboxes.device,\n task_type='mask',\n instance_results=[results])[0]\n else:\n im_mask = self._predict_by_feat_single(\n mask_preds=mask_pred,\n bboxes=bboxes,\n img_meta=img_meta,\n rescale=rescale)\n results.masks = im_mask\n return results_list", "def predict(self, X):\n\n pred = []\n for x_i in X:\n tmp = x_i\n p0 = self.model.predict(tmp.reshape(1,128,128,3))\n p1 = self.model.predict(np.fliplr(tmp).reshape(1,128,128,3))\n# p2 = self.model.predict(np.flipud(tmp).reshape(1,128,128,1))\n# p3 = self.model.predict(np.fliplr(np.flipud(tmp)).reshape(1,128,128,1))\n p = (p0[0] +\n np.fliplr(p1[0]) #+\n# np.flipud(p2[0]) +\n# np.fliplr(np.flipud(p3[0]))\n ) / 2#4\n pred.append(p)\n return np.array(pred)", "def get_batch_predictions(rnn, X, target):\n\n out = rnn.forward(X)\n arr_preds = nn.functional.softmax(out, dim=-1).data.cpu().numpy()\n arr_target = target.detach().cpu().numpy()\n\n return arr_preds, arr_target", "def predict(self, x):\n assert isinstance(x, np.ndarray)\n \n output = x\n for layer in self._layers:\n output = layer.feed_forward(output) \n return output", "def predict(model, session_batch):\n predicted = np.zeros((len(session_batch), 4))\n for i, session in enumerate(session_batch): \n legal_moves = session.possible_moves(session.current_player())\n move_preds = get_move_predictions(model, legal_moves, session)\n\n chosen_move_index = move_preds[:, 0].argmax()\n predicted[i, :] = move_preds[chosen_move_index, :]\n return predicted", "def predict_batch(self, states):\n\n s0 = np.concatenate(states[:,0]).reshape((self._batch_size, ) + self._state_shape[0]) \n # s1 = np.concatenate(states[:,1]).reshape((self._batch_size, self._state_shape[1]))\n s2 = np.array(states[:,2], dtype=np.float)\n\n return self._model.predict([s0,s2])", "def _process_batch(sess, original_images, semantic_predictions, image_names,\n image_heights, image_widths, image_id_offset, save_dir,\n raw_save_dir, train_id_to_eval_id=None):\n (original_images,\n semantic_predictions,\n image_names,\n image_heights,\n image_widths) = sess.run([original_images, semantic_predictions,\n image_names, image_heights, image_widths])\n\n num_image = semantic_predictions.shape[0]\n for i in range(num_image):\n image_height = np.squeeze(image_heights[i])\n image_width = np.squeeze(image_widths[i])\n original_image = np.squeeze(original_images[i])\n semantic_prediction = np.squeeze(semantic_predictions[i])\n crop_semantic_prediction = semantic_prediction[:image_height, :image_width]\n\n # Save image.\n save_annotation.save_annotation(\n original_image, save_dir, _IMAGE_FORMAT % (image_id_offset + i),\n add_colormap=False)\n\n # Save prediction.\n save_annotation.save_annotation(\n crop_semantic_prediction, save_dir,\n _PREDICTION_FORMAT % (image_id_offset + i), add_colormap=True,\n colormap_type=FLAGS.colormap_type)\n\n if FLAGS.also_save_raw_predictions:\n image_filename = os.path.basename(image_names[i])\n\n if train_id_to_eval_id is not None:\n crop_semantic_prediction = _convert_train_id_to_eval_id(\n crop_semantic_prediction,\n train_id_to_eval_id)\n save_annotation.save_annotation(\n crop_semantic_prediction, raw_save_dir, image_filename,\n add_colormap=False)", "def loss_and_predict(self,\n feats_dict: Dict,\n batch_data_samples: SampleList,\n proposal_cfg: Optional[dict] = None,\n **kwargs) -> Tuple[dict, InstanceList]:\n batch_gt_instances_3d = []\n batch_gt_instances_ignore = []\n batch_input_metas = []\n for data_sample in batch_data_samples:\n batch_input_metas.append(data_sample.metainfo)\n batch_gt_instances_3d.append(data_sample.gt_instances_3d)\n batch_gt_instances_ignore.append(\n data_sample.get('ignored_instances', None))\n raw_points = feats_dict.pop('raw_points')\n bbox_preds, cls_preds = self(feats_dict)\n\n loss_inputs = (bbox_preds, cls_preds,\n raw_points) + (batch_gt_instances_3d, batch_input_metas,\n batch_gt_instances_ignore)\n losses = self.loss_by_feat(*loss_inputs)\n\n predictions = self.predict_by_feat(\n raw_points,\n bbox_preds,\n cls_preds,\n batch_input_metas=batch_input_metas,\n cfg=proposal_cfg)\n feats_dict['points_cls_preds'] = cls_preds\n if predictions[0].bboxes_3d.tensor.isinf().any():\n print(predictions)\n return losses, predictions", "def predict(self, batch_inputs: dict,\n batch_data_samples: SampleList) -> SampleList:\n pass", "def predict(model, X_test, batch_size):\r\n # test\r\n predict = model.predict(X_test, batch_size=batch_size, verbose=1)\r\n \r\n # if the model return result for every time, get only last time\r\n if predict.ndim == 3:\r\n extract = []\r\n for i in range(len(X_test)):\r\n index = np.arange(len(X_test[i]))\r\n if len(index[np.any(X_test[i] != 0, axis=1)]) == 0:\r\n extract.append(predict[i, -1, :])\r\n else:\r\n extract.append(predict[i, index[np.any(X_test[i] != 0.0, axis=1)][-1], :])\r\n\r\n # extract = np.array([predict[i,len(X_test[i])-1,:] for i in range(len(X_test))])\r\n return np.array(extract)\r\n else:\r\n return predict", "def predict(self, X):\n\n pred = []\n for x_i in X:\n tmp = x_i\n p0 = self.model.predict(tmp.reshape(1,128,128,1))\n p1 = self.model.predict(np.fliplr(tmp).reshape(1,128,128,1))\n# p2 = self.model.predict(np.flipud(tmp).reshape(1,128,128,1))\n# p3 = self.model.predict(np.fliplr(np.flipud(tmp)).reshape(1,128,128,1))\n p = (p0[0] +\n np.fliplr(p1[0]) #+\n# np.flipud(p2[0]) +\n# np.fliplr(np.flipud(p3[0]))\n ) / 2#4\n pred.append(p)\n return np.array(pred)", "def class_predict(trained_model, X_test, y_test, image_name):\n if MODEL == 1:\n return class_predict_3(trained_model, X_test, y_test, image_name)\n elif MODEL == 3:\n return class_predict_3(trained_model, X_test, y_test, image_name)\n elif MODEL == 2:\n return class_predict_2(trained_model, X_test, y_test)\n else:\n # For models 4, 5 and 6\n return class_predict_3(trained_model, X_test, y_test, image_name)", "def BatchPredict(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def after_pred(self):\n # If training then skip\n if self.training:\n return\n\n # Get ground truths in epoch 0 i.e. start of training\n if self.epoch == 0:\n self.y_true.extend(self.y.cpu().flatten().numpy())\n\n # Get predictions from each batch and add them to prediction container\n y_pred = self.pred.detach().cpu()\n \n self.y_pred.extend(y_pred.flatten().numpy())", "def predict(self, predPoints=None):", "def predict_batch(self, states):\n #states is a batch with shape (#samples, sequence length, 3): [conv shape, phase shape, elapsed shape] in each row\n \n # print(\"in predict_batch, shape of states: \", states.shape)\n # print(\"in predict batch, shape of entries: \", states[:,0].shape, states[:,1].shape)\n \n # print(\"in predict batch, shape of states 1: \", states[:,1])\n \n \n \n s0 = np.concatenate(np.concatenate(states[:,:,0])).reshape((self._batch_size, self._sequence_length) + self._state_shape[0])\n # print(\"shapes after reshaping: s0: \", s0.shape, s0.dtype )\n\n # s1 = np.concatenate(np.concatenate(states[:,:,1])).reshape((self._batch_size, self._sequence_length, self._state_shape[1]))\n # print(\"shapes after reshaping: s1: \", s1.shape, s1.dtype)\n \n s2 = np.expand_dims(np.array(states[:,:,2], dtype=np.float), axis = 2)\n # print(\"shapes after reshaping: s2: \", s2.shape, s2.dtype)\n\n return self._model.predict([s0,s2])", "def predict(self):\n self.kf.predict()\n self.nb_kf_pred += 1\n if self.time_since_update > 0:\n self.hit_streak = 0\n self.time_since_update += 1\n self.history.append(self.kf.x[:2].reshape(-1))\n return self.history[-1]", "def predict(self, inputs):\n num_anchors_per_loc = self._params.get(\"num_scales\") * len(self._params.get(\"aspect_ratios\"))\n prediction_dict = retinanet(inputs, self._num_classes, num_anchors_per_loc, is_training=self._is_training)\n # generate anchors\n feature_map_shape_list = self._get_feature_map_shape(prediction_dict[\"feature_map_list\"])\n image_shape = shape_utils.combined_static_and_dynamic_shape(inputs)\n # initialize anchor generator\n if self._anchor_generator is None:\n self._anchor_generator = Anchor(feature_map_shape_list=feature_map_shape_list,\n img_size=(image_shape[1], image_shape[2]),\n anchor_scale=self._params.get(\"anchor_scale\"),\n aspect_ratios=self._params.get(\"aspect_ratios\"),\n scales_per_octave=self._params.get(\"num_scales\"))\n self._anchors = self._anchor_generator.boxes\n prediction_dict[\"inputs\"] = inputs\n prediction_dict[\"anchors\"] = self._anchors\n return prqediction_dict", "def predict(self, X: List[np.ndarray], **kwargs) -> List[np.ndarray]:", "def predict(self, instances, stats=None, **kwargs):\n\n stats = stats or Stats()\n self._validate_kwargs(kwargs)\n\n with stats.time(PREPROCESS_TIME):\n preprocessed = self.preprocess(instances, stats=stats, **kwargs)\n with stats.time(ENGINE_RUN_TIME):\n predicted_outputs = self._client.predict(\n preprocessed, stats=stats, **kwargs)\n with stats.time(POSTPROCESS_TIME):\n postprocessed = self.postprocess(\n predicted_outputs, original_input=instances, stats=stats, **kwargs)\n return postprocessed", "def predict(self, sess, img_data):\n\n with sess.as_default():\n new_image = self.preprocess(img_data, self.input_shape)\n input_feed = self.create_input_feed(sess, new_image, img_data)\n output_fetch = self.create_output_fetch(sess)\n all_classes, all_scores, all_bboxes = sess.run(output_fetch, input_feed)\n\n return all_classes, all_scores, all_bboxes", "def predict_on_batch(self, X):\n len_unpadded = len(X)\n if self.pad_batches:\n X = pad_features(self.batch_size, X)\n\n if not self._restored_model:\n self.restore()\n with self.eval_graph.graph.as_default():\n\n # run eval data through the model\n n_tasks = self.n_tasks\n output = []\n with self._get_shared_session(train=False).as_default():\n feed_dict = self.construct_feed_dict(X)\n data = self._get_shared_session(train=False).run(\n self.eval_graph.output, feed_dict=feed_dict)\n batch_output = np.asarray(data[:n_tasks], dtype=float)\n # reshape to batch_size x n_tasks x ...\n if batch_output.ndim == 3:\n batch_output = batch_output.transpose((1, 0, 2))\n elif batch_output.ndim == 2:\n batch_output = batch_output.transpose((1, 0))\n else:\n raise ValueError('Unrecognized rank combination for output: %s' %\n (batch_output.shape,))\n output.append(batch_output)\n\n outputs = np.array(\n from_one_hot(np.squeeze(np.concatenate(output)), axis=-1))\n\n outputs = np.copy(outputs)\n outputs = np.reshape(outputs, (len(X), n_tasks))\n outputs = outputs[:len_unpadded]\n return outputs", "def predict_on_batch(self, X):\n len_unpadded = len(X)\n if self.pad_batches:\n X = pad_features(self.batch_size, X)\n\n if not self._restored_model:\n self.restore()\n with self.eval_graph.graph.as_default():\n\n # run eval data through the model\n n_tasks = self.n_tasks\n outputs = []\n with self._get_shared_session(train=False).as_default():\n n_samples = len(X)\n feed_dict = self.construct_feed_dict(X)\n data = self._get_shared_session(train=False).run(\n self.eval_graph.output, feed_dict=feed_dict)\n batch_outputs = np.asarray(data[:n_tasks], dtype=float)\n # reshape to batch_size x n_tasks x ...\n if batch_outputs.ndim == 3:\n batch_outputs = batch_outputs.transpose((1, 0, 2))\n elif batch_outputs.ndim == 2:\n batch_outputs = batch_outputs.transpose((1, 0))\n # Handle edge case when batch-size is 1.\n elif batch_outputs.ndim == 1:\n n_samples = len(X)\n batch_outputs = batch_outputs.reshape((n_samples, n_tasks))\n else:\n raise ValueError('Unrecognized rank combination for output: %s' %\n (batch_outputs.shape))\n # Prune away any padding that was added\n batch_outputs = batch_outputs[:n_samples]\n outputs.append(batch_outputs)\n\n outputs = np.squeeze(np.concatenate(outputs))\n\n outputs = np.copy(outputs)\n\n # Handle case of 0-dimensional scalar output\n if len(outputs.shape) > 0:\n return outputs[:len_unpadded]\n else:\n outputs = np.reshape(outputs, (1,))\n return outputs", "def predict(self, samples):\n output = []\n samples, _ = u.to_augmented_array(samples)\n for sample in samples:\n output.append(self._feedforward(sample))\n return output", "def predict(self, sentences, batch_size=128):\n pred_tensors = [\n sentence_to_tensor(sentence, self.src_lang)\n for sentence in tqdm(sentences, desc=\"creating prediction tensors\")\n ]\n\n collate_fn = Collater(self.src_lang, predict=True)\n pred_dataloader = DataLoader(\n SimpleDataset(pred_tensors),\n batch_size=batch_size,\n collate_fn=collate_fn,\n )\n\n sentences = []\n words = []\n attention = []\n for batch in tqdm(pred_dataloader, desc=\"predict batch num\"):\n preds = self.predict_batch(batch.to(device))\n pred_sentences, pred_words, pred_attention = preds\n sentences.extend(pred_sentences)\n words.extend(pred_words)\n attention.extend(pred_attention)\n\n # sentences = [num pred sentences]\n # words = [num pred sentences, trg len]\n # attention = [num pred sentences, n heads, trg len, src len]\n\n return sentences, words, attention", "def singlePrediction(self,img):\n self.optimizer = SGD(lr = 0,momentum=0,decay = 0)\n self.createModel()\n output = self.model.predict(np.expand_dims(img,axis = 0))\n return output", "def evaluate(predict_var, x_unlabeled, inputs, batch_sizes):\n x = x_unlabeled\n\n # calculate batches for predict loop\n unlabeled_batch_size = batch_sizes.get(\"Embedding\", 0)\n batch_size = min(len(x[0]), unlabeled_batch_size)\n batches = make_batches(len(x[0]), batch_size)\n\n y_preds = []\n # predict over all points\n for j, (batch_start, batch_end) in enumerate(batches):\n feed_dict = {K.learning_phase(): 0}\n # feed corresponding input for each input_type\n for input_type, input_placeholder in inputs.items():\n if input_type == \"Embedding\":\n for i in range(len(input_placeholder)):\n feed_dict[input_placeholder[i]] = x[i][batch_start:batch_end]\n elif input_type == \"Orthogonal\":\n batch_ids = np.random.choice(\n len(x), size=min(len(x), batch_sizes[input_type]), replace=False\n )\n for i in range(len(input_placeholder)):\n feed_dict[input_placeholder[i]] = x[i][batch_ids]\n else:\n raise Exception(\"Unrecognized feed name ['{}']\".format(input_type))\n # evaluate the batch\n y_pred_batch = np.asarray(K.get_session().run(predict_var, feed_dict=feed_dict))\n y_preds.append(y_pred_batch)\n\n if len(y_preds[0].shape):\n return np.concatenate(y_preds)\n else:\n return np.sum(y_preds)", "def predict(model, y0, h0, Npred):\n if y0.ndim == 1:\n aug_len = y0.shape[0] + 1\n elif y0.ndim == 2:\n aug_len = y0.shape[0] * y0.shape[1] + 1\n else:\n raise ValueError(\"'y0' must either be a vector or a matrix.\")\n\n (map_ih,(Whh,shape),bh,Who) = model\n def _step(input, xs):\n (y,h_augmented) = input\n h = h_augmented[aug_len:]\n #h = jnp.tanh(sp_dot(Whh, h, shape[0]) + map_ih(y) + bh)\n h = jnp.tanh(sp_dot(Whh, h, shape[0]) + map_ih(y))\n h = jnp.hstack([[1.], y.reshape(-1), h])\n y = Who.dot(h).reshape(y.shape)\n return ((y,h), (y,h))\n\n xs = jnp.arange(Npred) # necessary for lax.scan\n ((y,h), (ys,hs)) = lax.scan(_step, (y0,h0), xs)\n return ((y,h), (ys,hs))", "def run(self, input):\n\n with torch.no_grad():\n input_tensor = self.preprocess_input(input)\n p_labels, p_probs = self.predict_on_batch(input_tensor)\n predictions = np.stack((p_labels, p_probs), axis=1)\n\n return predictions", "def predict(self, X):\n\n\t\tn_samples = X.shape[0]\n\t\tpredicted = np.zeros(n_samples)\n\n\t\tfor i in xrange(n_samples):\n\t\t\tpredicted[i] = self.classify_example(X[i])\n\n\t\treturn predicted", "def predict(self, x_test, y_test, model_path):\n tf.reset_default_graph()\n with tf.compat.v1.Session() as sess:\n saver = tf.compat.v1.train.import_meta_graph(model_path + \".meta\")\n saver.restore(sess, model_path)\n graph = tf.compat.v1.get_default_graph()\n x = graph.get_operation_by_name(\"x_input\").outputs[0]\n y = tf.compat.v1.get_collection(\"network_architecture\")[0]\n no_samples = x_test.shape[0]\n predictions = []\n n_iteration = no_samples // self.batch_size\n for step in range(n_iteration):\n x_batch, y_batch = get_batch_data(x_test, y_test, iter_step=step, batch_size=self.batch_size)\n preds = sess.run(y, feed_dict={x: x_batch})\n predictions.append(preds)\n return predictions", "def predict(self, X, pred_batch_size=None):\n raw_output = self.decision_function(X, pred_batch_size)\n predictions = (raw_output > 0).astype(int)\n return predictions", "def predict(self):\n self.canv.update()\n ps = self.canv.postscript(colormode='mono')\n img = Image.open(io.BytesIO(ps.encode('utf-8')))\n img.save('result.png')\n x = Predict.transform_image(self)\n \n #prediction with multivariate regression\n Y_hat_test = self.multivariate_model.predict([x])\n C_multivariate = map(np.argmax, Y_hat_test) # classification vector\n C_multivariate = list(C_multivariate)\n multivariate_predict = C_multivariate[0]\n\n \n #prediction with Linear Discriminant Analysis (LDA)\n lda_predict = self.lda_model.predict([x])[0]\n qda_predict = self.qda_model.predict([x])[0]\n log_predict = self.log_model.predict([x])[0]\n \n baseline_label = Label(self, text='Baseline: ' + str(multivariate_predict) )\n baseline_label.grid(row=0, column=1, padx=5, pady=5)\n lda_label = Label(self, text=' LDA: '+ str(lda_predict))\n lda_label.grid(row=0, column=2, padx=5, pady=5)\n qda_label = Label(self, text='QDA: '+ str(qda_predict))\n qda_label.grid(row=1, column=1, padx=5, pady=5)\n log_label = Label(self, text=' Logistic: '+str(log_predict))\n log_label.grid(row=1, column=2, padx=5, pady=5)", "def predict(image_path):\n img = image.load_img(image_path, target_size=image_size)\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n predictions = model.predict(x)\n plt.imshow(img)\n print('Predicted:', decode_predictions(predictions, top=1)[0])\n return decode_predictions(predictions, top=1)[0]", "def base_semantic_head__predict(self, x, batch_img_metas, rescale=False):\n seg_preds = self.forward(x)['seg_preds']\n img_shape = batch_img_metas[0]['batch_input_shape']\n seg_preds = F.interpolate(\n seg_preds,\n size=(img_shape[0], img_shape[1]),\n mode='bilinear',\n align_corners=False)\n return seg_preds", "def predict(self, x):\n return self.model.predict(x, batch_size=1, verbose=0)", "def fit_predict(self):\n self.classifier = self.model\n self.classifier.fit(self.X_sample, self.y_sample)\n self.y_pred = self.classifier.predict(self.X_test)", "def predict_data(img): \n return gennet.predict_data(img, 'Resnet50')", "def predict_one_step(X, model_package):\n model = model_package[\"model\"]\n scaler = model_package[\"scaler\"]\n yhat = model.predict(X)\n return yhat, scaler.inverse_transform(yhat)", "def predict(self, dataset, batch_size):\n self.eval()\n yhat = torch.Tensor().to(self.device)\n with torch.no_grad():\n for i in tqdm(range(0,len(dataset.X),batch_size)):\n batch_X = dataset.X[i:i+batch_size].view(-1,1,self.input_rows,self.input_cols).to(self.device)\n batch_y = dataset.y[i:i+batch_size].to(self.device)\n\n batch_ls_embed = dataset.ls[i:i+batch_size].to(self.device) if self.type == \"listener\" or self.type == \"both\" else None\n batch_sp_embed = dataset.sp[i:i+batch_size].to(self.device) if self.type == \"speaker\" or self.type == \"both\" else None\n\n outputs = self(batch_X, batch_ls_embed, batch_sp_embed)\n\n yhat = torch.cat((yhat, outputs), 0)\n\n\n\n yf = dataset.y[:, 1]\n yhatf = torch.argmax(yhat, 1).cpu()\n stats = precision_recall_fscore_support(yf, yhatf)\n\n tp = 0\n tn = 0\n fn = 0\n fp = 0\n for i, j in zip(yhat, dataset.y):\n if torch.argmax(i) == torch.argmax(j):\n if j.data.numpy()[0] == 1: # positive instance\n tp += 1\n else:\n tn += 1\n else:\n if j.data.numpy()[0] == 1:\n fn += 1\n else:\n fp += 1\n acc = (tp + tn) / (tp + tn + fp + fn)\n\n print(f\"Accuracy: {round(acc*100,4)}\")\n print(f\"Confusion: TP: {tp}, FP: {fp}, FN: {fn}, TN: {tn}\")\n\n print(f\"Precision BC: {round(stats[0][0]*100,4)}\")\n print(f\"Precision NO BC: {round(stats[0][1]*100,4)}\")\n print(f\"Recall BC: {round(stats[1][0]*100,4)}\")\n print(f\"Recall No BC: {round(stats[1][1]*100,4)}\")\n print(f\"F-score BC: {round(stats[2][0]*100,4)}\")\n print(f\"F-score No BC: {round(stats[2][1]*100,4)}\")", "def _predict(X, estimators, vectorize_times, predict_method):\n from scipy import stats\n from sklearn.base import is_classifier\n # Initialize results:\n\n orig_shape = X.shape\n n_epochs = orig_shape[0]\n n_times = orig_shape[-1]\n\n n_clf = len(estimators)\n\n # in simple case, we are predicting each time sample as if it\n # was a different epoch\n if vectorize_times: # treat times as trials for optimization\n X = np.hstack(X).T # XXX JRK: still 17% of cpu time\n n_epochs_tmp = len(X)\n\n # Compute prediction for each sub-estimator (i.e. per fold)\n # if independent, estimators = all folds\n for fold, clf in enumerate(estimators):\n _y_pred = getattr(clf, predict_method)(X)\n # See inconsistency in dimensionality: scikit-learn/scikit-learn#5058\n if _y_pred.ndim == 1:\n _y_pred = _y_pred[:, None]\n # initialize predict_results array\n if fold == 0:\n predict_size = _y_pred.shape[1]\n y_pred = np.ones((n_epochs_tmp, predict_size, n_clf))\n y_pred[:, :, fold] = _y_pred\n\n # Bagging: Collapse y_pred across folds if necessary (i.e. if independent)\n # XXX need API to identify how multiple predictions can be combined?\n if fold > 0:\n if is_classifier(clf) and (predict_method == 'predict'):\n y_pred, _ = stats.mode(y_pred, axis=2)\n else:\n y_pred = np.mean(y_pred, axis=2, keepdims=True)\n y_pred = y_pred[:, :, 0]\n # Format shape\n if vectorize_times:\n shape = [n_epochs, n_times, y_pred.shape[-1]]\n y_pred = y_pred.reshape(shape).transpose([1, 0, 2])\n return y_pred", "def predict(self, input):\n input = input.reshape((input.shape[0], 1))\n return self.feedforward(input)", "def predict(self,\n feats: Tuple[Tensor],\n batch_data_samples: OptSampleList,\n test_cfg: ConfigType = {}) -> Predictions:\n\n batch_coords = self.forward(feats) # (B, K, D)\n\n # Restore global position with target_root\n target_root = batch_data_samples[0].metainfo.get('target_root', None)\n if target_root is not None:\n target_root = torch.stack([\n torch.from_numpy(b.metainfo['target_root'])\n for b in batch_data_samples\n ])\n else:\n target_root = torch.stack([\n torch.empty((0), dtype=torch.float32)\n for _ in batch_data_samples[0].metainfo\n ])\n\n preds = self.decode((batch_coords, target_root))\n\n return preds" ]
[ "0.7239248", "0.7074474", "0.69302636", "0.68597704", "0.6841375", "0.6764959", "0.6649414", "0.6614922", "0.6554689", "0.6516499", "0.64637536", "0.64612246", "0.6373398", "0.63642263", "0.63606596", "0.63237506", "0.6289094", "0.62845325", "0.6273538", "0.62653995", "0.6237582", "0.618222", "0.61600596", "0.61600596", "0.6155271", "0.6144221", "0.6119147", "0.6114651", "0.61134136", "0.61037475", "0.6099846", "0.6099802", "0.60915905", "0.6060399", "0.6052373", "0.6051503", "0.6050634", "0.604184", "0.60402465", "0.6033783", "0.60092145", "0.60023975", "0.59992516", "0.5996891", "0.59934694", "0.598856", "0.59835386", "0.59821665", "0.5975081", "0.5969121", "0.59688234", "0.5965374", "0.59576976", "0.5949043", "0.594113", "0.59331304", "0.5927895", "0.59252036", "0.5923605", "0.59163046", "0.59093964", "0.5908818", "0.5906847", "0.5902236", "0.58985496", "0.58965135", "0.5892217", "0.58815295", "0.58794713", "0.58761936", "0.58736193", "0.5871639", "0.58694786", "0.5854639", "0.58517224", "0.5843771", "0.5836687", "0.5829668", "0.5824475", "0.5820395", "0.5819888", "0.58189076", "0.58186966", "0.581333", "0.58131754", "0.5811645", "0.5808164", "0.58050466", "0.5800095", "0.57944775", "0.578117", "0.5779918", "0.57768214", "0.5759396", "0.57589495", "0.575634", "0.5753835", "0.57516414", "0.57428294", "0.5740171" ]
0.8082307
0
Unstack batch dimension and split into channels and alpha mask.
def unstack_and_split(self, x, batch_size, num_channels=3): unstacked = torch.reshape(x, [batch_size, -1] + list(x.shape)[1:]) channels, masks = torch.split(unstacked, [num_channels, 1], dim=2) return channels, masks
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _reshape_channels(x):\n assert x.dim() == 4\n batch_size, nc, h, w = x.size()\n x_t = x.view(batch_size, nc, -1).transpose(1, 2).contiguous()\n x_t = x_t.view(batch_size, h, w, nc)\n return x_t", "def batch_collate_fn(batch):\n images = []\n masks = []\n \n for (image, trimap, mask) in batch:\n mask = mask.unsqueeze(0)\n trimap = trimap.unsqueeze(0)\n image = torch.cat([image, trimap], 0).unsqueeze(0)\n \n images.append(image)\n masks.append(mask)\n\n images = torch.cat(images, 0)\n masks = torch.cat(masks, 0)\n\n return (images, masks)", "def test_unstack2():\n x = np.arange(1, 25).reshape((4, 2, 3)).astype(np.float32)\n axis = 2\n x_tensor = paddle.to_tensor(x)\n out_list = paddle.unstack(x_tensor, axis=axis)\n length = len(out_list)\n for i in range(length):\n ept = x[:, :, i]\n npt.assert_allclose(out_list[i].numpy(), ept)", "def test_unstack1():\n x = np.arange(1, 25).reshape((4, 2, 3)).astype(np.float32)\n axis = -1\n x_tensor = paddle.to_tensor(x)\n out_list = paddle.unstack(x_tensor, axis=axis)\n length = len(out_list)\n for i in range(length):\n ept = x[:, :, i]\n npt.assert_allclose(out_list[i].numpy(), ept)", "def test_unstack():\n x = np.arange(1, 13).reshape((3, 2, 2)).astype(np.int32)\n axis = 0\n x_tensor = paddle.to_tensor(x)\n out_list = paddle.unstack(x_tensor, axis=axis)\n length = len(out_list)\n for i in range(length):\n ept = x[i, :, :]\n npt.assert_allclose(out_list[i].numpy(), ept)", "def flatten_image(x):\n *batch_shape, h, w, c = x.shape\n return x.reshape((*batch_shape, h * w * c))", "def test_unstack3():\n x = np.arange(1, 49).reshape((4, 3, 2, 2)).astype(np.float64)\n axis = 1\n x_tensor = paddle.to_tensor(x)\n out_list = paddle.unstack(x_tensor, axis=axis)\n length = len(out_list)\n for i in range(length):\n ept = x[:, i, :, :]\n npt.assert_allclose(out_list[i].numpy(), ept)", "def _reshape(self, data):\n batch_size, height, width, n_channels = data.shape\n if self._grid_height:\n grid_height = self._grid_height\n else:\n grid_height = int(math.floor(math.sqrt(batch_size)))\n\n grid_width = int(math.ceil(batch_size/grid_height))\n\n if n_channels == 1:\n data = np.tile(data, (1, 1, 1, 3))\n n_channels = 3\n\n if n_channels != 3:\n raise ValueError('Image batch must have either 1 or 3 channels, but '\n 'was {}'.format(n_channels))\n\n shape = (height * grid_height, width * grid_width, n_channels)\n buf = np.full(shape, 255, dtype=np.uint8)\n multiplier = 1 if data.dtype in (np.int32, np.int64) else 255\n\n for k in range(batch_size):\n i = k // grid_width\n j = k % grid_width\n arr = data[k]\n x, y = i * height, j * width\n buf[x:x + height, y:y + width, :] = np.clip(\n multiplier * arr, 0, 255).astype(np.uint8)\n\n if self._zoom > 1:\n buf = buf.repeat(self._zoom, axis=0).repeat(self._zoom, axis=1)\n return buf", "def detection_collate(batch):\n targets = []\n imgs = []\n masks = []\n num_crowds = []\n\n for sample in batch:\n imgs.append(sample[0])\n targets.append(torch.FloatTensor(sample[1][0]))\n masks.append(torch.FloatTensor(sample[1][1]))\n num_crowds.append(sample[1][2])\n\n return imgs, (targets, masks, num_crowds)", "def make_grid(batch_img: torch.Tensor,\n batch_mask: torch.Tensor,\n img_denormalize_fn: Callable,\n mask_palette: Optional[Sequence] = default_palette,\n batch_gt_mask: Optional[torch.Tensor] = None):\n assert isinstance(batch_img, torch.Tensor) and isinstance(batch_mask, torch.Tensor)\n assert len(batch_img) == len(batch_mask)\n\n if batch_gt_mask is not None:\n assert isinstance(batch_gt_mask, torch.Tensor)\n assert len(batch_mask) == len(batch_gt_mask)\n\n b = batch_img.shape[0]\n h, w = batch_img.shape[2:]\n\n le = 3 if batch_gt_mask is None else 3 + 2\n out_image = np.zeros((h * le, w * b, 3), dtype='uint8')\n\n for i in range(b):\n img = batch_img[i]\n mask = batch_mask[i]\n\n img = img_denormalize_fn(img)\n img = tensor_to_numpy(img)\n img = render_image(img)\n mask = mask.cpu().numpy()\n mask = render_mask(mask, mask_palette)\n\n out_image[0:h, i * w:(i + 1) * w, :] = img\n out_image[1 * h:2 * h, i * w:(i + 1) * w, :] = render_datapoint(img,\n mask,\n blend_alpha=0.4)\n out_image[2 * h:3 * h, i * w:(i + 1) * w, :] = mask\n\n if batch_gt_mask is not None:\n gt_mask = batch_gt_mask[i]\n gt_mask = gt_mask.cpu().numpy()\n gt_mask = render_mask(gt_mask, mask_palette)\n out_image[3 * h:4 * h, i * w:(i + 1) * w, :] = render_datapoint(img,\n gt_mask,\n blend_alpha=0.4)\n out_image[4 * h:5 * h, i * w:(i + 1) * w, :] = gt_mask\n\n return out_image", "def cutmix(batch: Tuple[torch.Tensor, torch.Tensor], alpha: float = 1.0) -> Tuple:\n data, targets = batch\n indices = torch.randperm(data.size(0))\n shuffled_data = data[indices]\n shuffled_targets = targets[indices]\n lam = np.random.beta(alpha, alpha) if alpha > 0 else 1\n\n x0, x1, y0, y1 = random_bbox(data, lam)\n\n data[:, :, y0:y1, x0:x1] = shuffled_data[:, :, y0:y1, x0:x1]\n\n targets = (targets, shuffled_targets, lam)\n\n return data, targets", "def unstack_batch(tensor_dict):\n # # extract tensor from tuple. TODO: figure out where box tuple comes from?\n for key in tensor_dict.keys():\n if key == \"gt_boxes\":\n tensor_dict[\"gt_boxes\"] = tensor_dict[\"gt_boxes\"][0]\n unbatched_tensor_dict = {key: tf.unstack(tensor) for key, tensor in tensor_dict.items()}\n # remove padding along 'num_boxes' dimension of the gt tensors\n num_gt_list = unbatched_tensor_dict[\"num_gt_boxes\"]\n unbatched_unpadded_tensor_dict = {}\n for key in unbatched_tensor_dict:\n if key == \"num_gt_boxes\":\n continue\n unpadded_tensor_list = []\n for num_gt, padded_tensor in zip(num_gt_list, unbatched_tensor_dict[key]):\n tensor_shape = shape_utils.combined_static_and_dynamic_shape(padded_tensor)\n slice_begin = tf.zeros(len(tensor_shape), dtype=tf.int32)\n slice_size = tf.stack([num_gt] + [-1 if dim is None else dim for dim in tensor_shape[1:]])\n unpadded_tensor = tf.slice(padded_tensor, slice_begin, slice_size)\n unpadded_tensor_list.append(unpadded_tensor)\n unbatched_unpadded_tensor_dict[key] = unpadded_tensor_list\n return unbatched_unpadded_tensor_dict", "def _batchify(data: nd.NDArray, batch_size):\n # Work out how cleanly we can divide the dataset into bsz parts.\n nbatch = len(data) // batch_size\n # Trim off any extra elements that wouldn't cleanly fit (remainders).\n data = data[0: nbatch * batch_size]\n # Evenly divide the data across the bsz batches.\n data = data.reshape(batch_size, -1).transpose()\n # if torch.cuda.is_available():\n # data = data.cuda()\n return data", "def batch_to_space(stacked_patches, tiles, shape_padded_label, shape_image, channels, b_verbose=False):\n\n shape_stacked = tf.unstack(tf.shape(stacked_patches)) # something like [x, img_1, img_2, img_3, channel]\n stacked_patches = tf.reshape(stacked_patches, [tiles[0], tiles[1], tiles[2], *shape_stacked[1:]]) # split stacks into tiles\n stacked_patches = tf.transpose(stacked_patches, perm=[0, 3, 1, 4, 2, 5, 6]) # interleave tiles and img dims\n if b_verbose:\n stacked_patches = tf.Print(stacked_patches, [tf.shape(stacked_patches)], 'stacked_patches:', summarize=10)\n image = tf.reshape(stacked_patches, [-1, *shape_padded_label, channels]) # reshape into proper image\n if b_verbose:\n image = tf.Print(image, [tf.shape(image)], 'new_image:', summarize=10)\n\n # crop image to final size\n pos_begin = [int((a - b) / 2) for a, b in zip(shape_padded_label, shape_image)]\n image = tf.Print(image, [tf.shape(image), pos_begin, shape_image, channels], 'shapes before slicing', summarize=5)\n image = tf.slice(image,\n [0, *pos_begin, 0],\n [1, *shape_image, channels])\n\n return image", "def forward(self, x, alpha=1e-8):\r\n batch_size, _, height, width = x.shape\r\n\r\n # [B x C x H x W] Subtract mean over batch.\r\n y = x - x.mean(dim=0, keepdim=True)\r\n\r\n # [1 x C x H x W] Calc standard deviation over batch\r\n y = torch.sqrt(y.pow(2.).mean(dim=0, keepdim=False) + alpha)\r\n\r\n # [1] Take average over feature_maps and pixels.\r\n y = y.mean().view(1, 1, 1, 1)\r\n\r\n # [B x 1 x H x W] Replicate over group and pixels.\r\n y = y.repeat(batch_size, 1, height, width)\r\n\r\n # [B x C x H x W] Append as new feature_map.\r\n y = torch.cat([x, y], 1)\r\n\r\n # return the computed values:\r\n return y", "def _batchify(batch):\n im_name, im0, im1, im2, im3, im4, im5, im6, im7, im8, im9 = zip(*batch)\n im0 = nd.stack(*im0)\n im1 = nd.stack(*im1)\n im2 = nd.stack(*im2)\n im3 = nd.stack(*im3)\n im4 = nd.stack(*im4)\n im5 = nd.stack(*im5)\n im6 = nd.stack(*im6)\n im7 = nd.stack(*im7)\n im8 = nd.stack(*im8)\n im9 = nd.stack(*im9)\n return im_name, im0, im1, im2, im3, im4, im5, im6, im7, im8, im9", "def collate_fn(self, batch):\n images = list()\n targets = list()\n\n for b in batch:\n images.append(b[0])\n targets.append(b[1])\n\n # images = torch.stack(images, dim=0)\n\n return images, targets # tensor (N, 3, 300, 300), 3 lists of N tensors each", "def combined_masks(action_mask,betsize_mask):\n if action_mask.dim() > 2:\n return torch.cat([action_mask[:,:,:-2],betsize_mask],dim=-1)\n elif action_mask.dim() > 1:\n return torch.cat([action_mask[:,:-2],betsize_mask],dim=-1)\n else:\n return torch.cat([action_mask[:-2],betsize_mask])", "def detection_collate(batch):\n targets = []\n imgs = []\n for sample in batch:\n imgs.append(sample[0])\n targets.append(torch.FloatTensor(sample[1]))\n return torch.stack(imgs, 0), targets", "def split_and_concat_model():\n x = tf.keras.Input(shape=[224, 224, 3, ])\n # TODO: implement split for the following commented out method of splitting\n # y1 = x[:, :100, :, :]\n # y2 = x[:, 101:, :, :]\n y1, y2 = tf.split(x, [100, 124], 1)\n y1 = tf.nn.relu(y1)\n y2 = tf.keras.layers.BatchNormalization()(y2)\n z = tf.keras.layers.concatenate([y1, y2], axis=1)\n z = tf.keras.layers.Flatten()(z)\n output = tf.keras.layers.Dense(10, activation=tf.nn.softmax, name=\"split_and_concat_model\")(z)\n return output", "def unbatch_stack(S, grid_shape):\n\tI, J = grid_shape\n\tC, M = S.shape[1], S.shape[2]\n\treturn S.reshape(-1, I, J, C, M, M)", "def collate_fn(self, batch):\n images = list()\n boxes = list()\n labels = list()\n difficulties = list()\n\n for b in batch:\n images.append(b[0])\n boxes.append(b[1])\n labels.append(b[2])\n difficulties.append(b[3])\n\n images = torch.stack(images, dim=0)\n\n return images, boxes, labels, difficulties # tensor (N, 3, 300, 300), 3 lists of N tensors each", "def split_3d_array_into_channels(arr):\n return [arr[:, :, i] for i in range(arr.shape[-1])]", "def forward_single(self, x: Tensor, batch_img_metas: List[dict]):\n img_h, img_w = batch_img_metas[0]['pad_shape'][:2]\n batch_size, _, feat_h, feat_w = x.shape\n downsample_ratio = img_h / feat_h\n\n for conv_cls_prev_layer in self.conv_cls_prev:\n cls_feat = conv_cls_prev_layer(x)\n out_cls = self.conv_cls(cls_feat)\n\n if self.use_edge_fusion:\n # calculate the edge indices for the batch data\n edge_indices_list = get_edge_indices(\n batch_img_metas, downsample_ratio, device=x.device)\n edge_lens = [\n edge_indices.shape[0] for edge_indices in edge_indices_list\n ]\n max_edge_len = max(edge_lens)\n edge_indices = x.new_zeros((batch_size, max_edge_len, 2),\n dtype=torch.long)\n for i in range(batch_size):\n edge_indices[i, :edge_lens[i]] = edge_indices_list[i]\n # cls feature map edge fusion\n out_cls = self.edge_fuse_cls(cls_feat, out_cls, edge_indices,\n edge_lens, feat_h, feat_w)\n\n bbox_pred = []\n\n for i in range(len(self.group_reg_dims)):\n reg_feat = x.clone()\n # feature regression head\n if len(self.reg_branch[i]) > 0:\n for conv_reg_prev_layer in self.conv_reg_prevs[i]:\n reg_feat = conv_reg_prev_layer(reg_feat)\n\n for j, conv_reg in enumerate(self.conv_regs[i]):\n out_reg = conv_reg(reg_feat)\n # Use Edge Fusion Module\n if self.use_edge_fusion and (i, j) in self.edge_fusion_inds:\n # reg feature map edge fusion\n out_reg = getattr(self, 'edge_fuse_reg_{}_{}'.format(\n i, j))(reg_feat, out_reg, edge_indices, edge_lens,\n feat_h, feat_w)\n bbox_pred.append(out_reg)\n\n bbox_pred = torch.cat(bbox_pred, dim=1)\n cls_score = out_cls.sigmoid() # turn to 0-1\n cls_score = cls_score.clamp(min=1e-4, max=1 - 1e-4)\n\n return cls_score, bbox_pred", "def tohost(x):\n n_device, n_batch, *remaining_dims = x.shape\n return x.reshape((n_device * n_batch,) + tuple(remaining_dims))", "def adapt_batch(batch):\n image_arrays, labellings = batch\n\n current_batch_size = len(labellings)\n\n images = np.array(image_arrays).reshape(current_batch_size, *image_arrays[0].shape)\n\n padded_labellings = pad_labellings(labellings)\n\n labels = np.array(padded_labellings, dtype=np.int32).reshape(current_batch_size, -1)\n\n input_lengths = compute_input_lengths(image_arrays)\n\n label_lengths = np.array([len(labelling) for labelling in labellings],\n dtype=np.int32).reshape(current_batch_size, 1)\n\n return [images, labels, input_lengths, label_lengths], labels", "def split_images(x, y=None, size=(128, 128), num_part=4):\n x_patches = image.PatchExtractor(patch_size=size, max_patches=num_part, random_state=0)\n x_imgs = x_patches.transform(x)\n # Check if number of channels is the same for grayscale\n if x.shape[-1] != x_imgs.shape[-1]:\n x_imgs = x_imgs[:, :, :, np.newaxis]\n\n if not y is None:\n y_patches = image.PatchExtractor(patch_size=size, max_patches=num_part, random_state=0)\n y_imgs = y_patches.transform(y)\n\n # Check if number of channels is the same for grayscale\n if y.shape[-1] != y_imgs.shape[-1]:\n y_imgs = y_imgs[:, :, :, np.newaxis]\n\n return x_imgs, y_imgs\n\n return x_imgs", "def get_channels(batch):\n y = batch[..., :1]\n uv = batch[..., 1:3]\n edge = batch[..., 3:]\n\n return y, uv, edge", "def unstack(a, axis=0):\n shape = a.shape\n return [jnp.squeeze(b, axis=axis) for b in \\\n jnp.split(a, shape[axis], axis=axis)]", "def _to_stack(self, values):\n if self.batch_size > 1:\n try:\n values = np.stack(values, axis=0)\n except Exception as identifier:\n for x in values:\n print(x.shape)\n _, ax = plt.subplots(1)\n ax.imshow(x[..., 0])\n ax.set_title('ERROR!')\n plt.show()\n print(identifier)\n raise(Exception)\n else:\n values = values[0][np.newaxis, ...]\n return values", "def batch_flatten(this,x):\n shape = x.get_shape().as_list()[1:]\n if None not in shape:\n return tf.reshape(x, [-1, int(np.prod(shape))])\n return tf.reshape(x, tf.stack([tf.shape(x)[0], -1]))", "def batch_flatten(this,x):\n shape = x.get_shape().as_list()[1:]\n if None not in shape:\n return tf.reshape(x, [-1, int(np.prod(shape))])\n return tf.reshape(x, tf.stack([tf.shape(x)[0], -1]))", "def detection_collate(batch):\n targets = []\n imgs = []\n inputs = {}\n ws = []\n hs = []\n im_ids = []\n scales = []\n crop_box = []\n for sample in batch:\n imgs.append(sample[0]['data'])\n targets.append(torch.FloatTensor(sample[1]))\n ws.append(sample[0]['width'])\n hs.append(sample[0]['height'])\n im_ids.append(sample[0]['image_id'])\n if 'scale' in sample[0]:\n scales.append(sample[0]['scale'])\n if 'crop_box' in sample[0]:\n crop_box.append(sample[0]['crop_box'])\n inputs['data'] = torch.stack(imgs, 0)\n inputs['width'] = ws\n inputs['height'] = hs\n inputs['image_id'] = im_ids\n inputs['scale'] = scales\n inputs['crop_box'] = crop_box\n return inputs, targets", "def _reshape(self, data):\n\n\t\td = np.zeros((32,32,3))\n\t\td_r = data[0:1024].reshape(32,32)\n\t\td_g = data[1024:2048].reshape(32,32)\n\t\td_b = data[2048:].reshape(32,32)\n\n\t\tfor h in range(32):\n\t\t for w in range(32):\n\t\t for c in range(3):\n\n\t\t if c == 0 : d[h,w,c] = d_r[h,w]\n\t\t elif c == 1 : d[h,w,c] = d_g[h,w]\n\t\t else : d[h,w,c] = d_b[h,w]\n\n\t\tarray = np.array(d, dtype=np.uint8)\n\t\timg = Image.fromarray(array)\n\t\ttemp = img.resize(size = (64,64))\n\t\td = image.img_to_array(temp)\n\n\t\t#plt.imshow(d)\n\t\t#plt.show()\n\t\treturn d", "def pack_batch(label_encoder, batch, device=None):\n (word, char), tasks = label_encoder.transform(batch)\n\n word = torch_utils.pad_batch(word, label_encoder.word.get_pad(), device=device)\n char = torch_utils.pad_batch(char, label_encoder.char.get_pad(), device=device)\n\n output_tasks = {}\n for task, data in tasks.items():\n output_tasks[task] = torch_utils.pad_batch(\n data, label_encoder.tasks[task].get_pad(), device=device)\n\n return (word, char), output_tasks", "def flatten_layers(data):\n return data.reshape((data.shape[0], data.shape[1], -1))", "def collate(self, batch):\n \n images = []\n indices = []\n roi_size = 5 if self.Train else 4\n rois = torch.zeros((len(batch), 20, roi_size), dtype=torch.float32)\n rois = rois.to(batch[0][1].device)\n \n for _b in range(len(batch)):\n # Accumulate patches:\n images.append(batch[_b][0].to(torch.float32))\n indices.append(batch[_b][2])\n \n # Accumulate ROI:\n \"\"\"\n image_num = torch.Tensor([_b]).expand(batch[_b][1].size(0))\n image_num = image_num.type(batch[_b][1].dtype).view(-1,1)\n image_num = image_num.to(batch[_b][1].device)\n _roi = torch.cat([image_num, batch[_b][1]], dim=1)\n rois = torch.cat([rois, _roi], dim=0)\n \"\"\"\n num_boxes = batch[_b][1].size(0)\n rois[_b,:num_boxes,:] = batch[_b][1]\n \n \n # Stack outputs and return\n batch = [torch.stack(images, dim=0), rois, torch.Tensor(indices)]\n return batch", "def _reshape(self, data):\n\n\t\t\td = np.zeros((32,32,3))\n\t\t\td_r = data[0:1024].reshape(32,32)\n\t\t\td_g = data[1024:2048].reshape(32,32)\n\t\t\td_b = data[2048:].reshape(32,32)\n\n\t\t\tfor h in range(32):\n\t\t\t for w in range(32):\n\t\t\t for c in range(3):\n\n\t\t\t if c == 0 : d[h,w,c] = d_r[h,w]\n\t\t\t elif c == 1 : d[h,w,c] = d_g[h,w]\n\t\t\t else : d[h,w,c] = d_b[h,w]\n\n\t\t\tarray = np.array(d, dtype=np.uint8)\n\t\t\timg = Image.fromarray(array)\n\t\t\ttemp = img.resize(size = (64,64))\n\t\t\td = image.img_to_array(temp)\n\n\t\t\t#plt.imshow(d)\n\t\t\t#plt.show()\n\t\t\treturn d", "def detection_collate(batch):\n targets = []\n imgs = []\n length = []\n for sample in batch:\n imgs.append(sample[0])\n targets.append(sample[1])\n length.append(sample[2])\n return torch.stack(imgs, 0), torch.stack(targets, 0), torch.tensor(length)", "def pack_images(images, rows, cols):\n shape = tf.shape(images)\n width = shape[-3]\n height = shape[-2]\n depth = shape[-1]\n images = tf.reshape(images, (-1, width, height, depth))\n batch = tf.shape(images)[0]\n rows = tf.minimum(rows, batch)\n cols = tf.minimum(batch // rows, cols)\n images = images[:rows * cols]\n images = tf.reshape(images, (rows, cols, width, height, depth))\n images = tf.transpose(images, [0, 2, 1, 3, 4])\n images = tf.reshape(images, [1, rows * width, cols * height, depth])\n return images", "def _extract_bands_feature(self, eopatch: EOPatch, images: Iterable[np.ndarray], shape: tuple[int, ...]) -> None:\n processed_bands = []\n for band_info in self.requested_bands:\n tiffs = [tar[band_info.name + \".tif\"] for tar in images]\n dtype = self.bands_dtype or band_info.output_types[0]\n processed_bands.append(self._extract_array(tiffs, 0, shape, dtype))\n\n bands_feature = cast(Tuple[FeatureType, str], self.bands_feature) # verified by `if` in _extract_data\n eopatch[bands_feature] = np.concatenate(processed_bands, axis=-1)", "def process_data(self, spec):\n with torch.no_grad():\n \n assert(len(spec) == 5), 'dataloader should return (spec_masked, pos_enc, mask_label, attn_mask, spec_stacked)'\n # Unpack and Hack bucket: Bucketing should cause acoustic feature to have shape 1xBxTxD'\n spec_masked = spec[0].squeeze(0)\n pos_enc = spec[1].squeeze(0)\n mask_label = spec[2].squeeze(0)\n attn_mask = spec[3].squeeze(0)\n spec_stacked = spec[4].squeeze(0)\n\n spec_masked = spec_masked.to(device=self.device)\n pos_enc = torch.FloatTensor(pos_enc).to(device=self.device)\n mask_label = torch.ByteTensor(mask_label).to(device=self.device)\n attn_mask = torch.FloatTensor(attn_mask).to(device=self.device)\n spec_stacked = spec_stacked.to(device=self.device)\n\n return spec_masked, pos_enc, mask_label, attn_mask, spec_stacked # (x, pos_enc, mask_label, attention_mask. y)", "def _data_generation(self, batch_data):\n # Initialization\n batch_x = []\n batch_y = defaultdict(list)\n\n for ind, item_data in batch_data.iterrows():\n img_path = os.path.join(self.img_dir, \"images\", \"rgb\", item_data[\"name\"])\n img = cv2.imread(img_path)\n try:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n except Exception as error:\n print(img_path)\n print(error)\n not_valid_mask = self.read_masks_borders(item_data[\"name\"])\n img[not_valid_mask] = 0\n\n # getmasks\n targets = np.zeros((img.shape[0], img.shape[1], len(self.classes)))\n for i, c in enumerate(self.classes):\n mask_path = os.path.join(self.img_dir, \"labels\", c, item_data[\"name\"])\n mask = cv2.imread(\n mask_path.replace(\".jpg\", \".png\"), cv2.IMREAD_GRAYSCALE\n )\n mask[not_valid_mask[:, :, 0]] = 0\n mask = mask > 0\n targets[:, :, i] = mask\n\n res = self.reshape_func(image=img, mask=targets)\n img, targets = res['image'], res['mask']\n if self.do_aug:\n res = self.aug(image=img, mask=targets)\n img, targets = res['image'], res['mask']\n\n for i, c in enumerate(self.classes):\n batch_y[c].append(targets[:, :, i])\n\n batch_x.append(img)\n\n batch_x = np.array(batch_x, np.float32)\n batch_y = {k: np.array(v, np.float32) for k, v in batch_y.items()}\n batch_y = {k: np.expand_dims(v, axis=-1) for k, v in batch_y.items()}\n\n return (\n imagenet_utils.preprocess_input(batch_x, \"channels_last\", mode=\"tf\"),\n batch_y\n )", "def _collate_fn(batch):\n # imgs = [b[0] for b in batch]\n # labels = [b[1] for b in batch]\n # imgs = torch.stack(imgs, dim=0)\n # return [imgs, labels]\n imgs = [b[0] for b in batch]\n labels = [b[1] for b in batch]\n imgs = torch.cat(imgs, dim=0)\n labels = [l for sublist in labels for l in sublist]\n return [imgs, labels]", "def reshape(arr):\r\n reshape_arr = np.empty((3,240,320),dtype='float32')\r\n reshape_arr[0,:,:] = arr[:,:,0]\r\n reshape_arr[1,:,:] = arr[:,:,1]\r\n reshape_arr[2,:,:] = arr[:,:,2]\r\n return reshape_arr", "def _stack_images_masks_flair(self, pth, patient_id, img_cnt):\n\n img_stack, msk_stack = [], []\n img_file_name = \"{patient}_{id}.tif\"\n msk_file_name = \"{patient}_{id}_mask.tif\"\n for i in range(1, img_cnt + 1):\n img = cv2.imread(os.path.join(pth, img_file_name.format(patient=patient_id, id=i)))\n mask = cv2.imread(os.path.join(pth, msk_file_name.format(patient=patient_id, id=i)))\n img_stack.append(img)\n msk_stack.append(mask)\n img_stack, msk_stack = np.array(img_stack), np.array(msk_stack)\n return img_stack, msk_stack", "def pack_images(images, rows, cols):\n shape = tf.shape(input=images)\n width = shape[-3]\n height = shape[-2]\n depth = shape[-1]\n images = tf.reshape(images, (-1, width, height, depth))\n batch = tf.shape(input=images)[0]\n rows = tf.minimum(rows, batch)\n cols = tf.minimum(batch // rows, cols)\n images = images[:rows * cols]\n images = tf.reshape(images, (rows, cols, width, height, depth))\n images = tf.transpose(a=images, perm=[0, 2, 1, 3, 4])\n images = tf.reshape(images, [1, rows * width, cols * height, depth])\n return images", "def preprocess_image(self, batched_inputs):\n images = [x[\"image\"].to(self.device) for x in batched_inputs]\n images_aug = [x[\"image_color\"].to(self.device) for x in batched_inputs]\n\n images = [self.normalizer(x) for x in images]\n images_aug = [self.normalizer(x) for x in images_aug]\n\n images = ImageList.from_tensors(images,\n self.backbone.size_divisibility)\n images_aug = ImageList.from_tensors(images_aug,\n self.backbone.size_divisibility)\n return images, images_aug", "def _reshape_output_batch(self, number, output):\n #tt = cutotime('reshape')\n #tt.start()\n output = output.reshape(self.output_shapes[number]) # batch, h, w, 3, (5 + 80)\n #tt.stop()\n return output", "def forward(self, xs, ilens, masks):\n if isinstance(self.embed, Conv2dSubsampling):\n xs, masks = self.embed(xs, masks)\n else:\n xs = self.embed(xs)\n xs, _ = self.encoders(xs, masks)\n if self.normalize_before:\n xs = self.after_norm(xs)\n hlens = [xs.size(1) for i in range(xs.size(0))]\n return xs, hlens", "def _extract_array(tiffs: list[np.ndarray], idx: int, shape: tuple[int, ...], dtype: type | np.dtype) -> np.ndarray:\n feature_arrays = (np.atleast_3d(img)[..., idx] for img in tiffs)\n return np.asarray(list(feature_arrays), dtype=dtype).reshape(*shape, 1)", "def ensure_alpha_channel(img, alpha=1.0, dtype=np.float32, copy=False):\n img = im_core.ensure_float01(img, dtype=dtype, copy=copy)\n c = im_core.num_channels(img)\n if c == 4:\n return img\n else:\n if isinstance(alpha, np.ndarray):\n alpha_channel = alpha\n else:\n alpha_channel = np.full(img.shape[0:2], fill_value=alpha, dtype=img.dtype)\n if c == 3:\n return np.dstack([img, alpha_channel])\n elif c == 1:\n return np.dstack([img, img, img, alpha_channel])\n else:\n raise ValueError(\n 'Cannot ensure alpha. Input image has c={} channels'.format(c))", "def __call__(self, batch):\r\n '''\r\n for i in range(len(batch)):\r\n if batch[i].shape[1] != 861:\r\n batch[i] = batch[i - 1]\r\n '''\r\n return torch.tensor(batch)#torch.stack(batch, dim = 0)\r", "def detection_collate(batch):\n label_len = len(batch[0][2])\n target1 = []\n target2 = []\n if label_len == 3:\n target3 = []\n imgs1 = []\n imgs2 = []\n image_ids = []\n for sample in batch:\n imgs1.append(sample[0])\n imgs2.append(sample[1])\n target1.append(torch.FloatTensor(sample[2][0]))\n target2.append(torch.FloatTensor(sample[2][1]))\n image_ids.append(sample[3])\n if label_len == 3:\n target3.append(torch.FloatTensor(sample[2][2]))\n if label_len == 3:\n return [torch.stack(imgs1, 0), torch.stack(imgs2, 0), torch.stack(target1, 0), torch.stack(target2, 0),\n torch.stack(target3, 0), image_ids]\n else:\n return [torch.stack(imgs1, 0), torch.stack(imgs2, 0), torch.stack(target1, 0), torch.stack(target2, 0), image_ids]", "def _find_masks(batch, min_size=10):\n result = []\n for b in batch:\n assert b.shape[0] == 1\n patch = b[0]\n z_sum = patch.sum(axis=(1, 2))\n coords = np.where(z_sum > min_size)[0]\n if len(coords) > 0:\n ind = coords[len(coords) // 2]\n result.append(b[:, ind:ind + 1, ...])\n else:\n ind = b.shape[1] // 2\n result.append(b[:, ind:ind + 1, ...])\n\n return np.stack(result, axis=0)", "def vg_collate_fn(batch):\n # batch is a list, and each element is (image, objs, boxes, triples)\n all_imgs, all_objs, all_boxes, all_masks, all_obj_to_img = [], [], [], [], []\n # obj_offset = 0\n for i, (img, objs, boxes, masks) in enumerate(batch):\n all_imgs.append(img[None])\n O = objs.size(0)\n all_objs.append(objs)\n all_boxes.append(boxes)\n all_masks.append(masks)\n\n all_obj_to_img.append(torch.LongTensor(O).fill_(i))\n # obj_offset += O\n\n all_imgs = torch.cat(all_imgs)\n all_objs = torch.cat(all_objs)\n all_boxes = torch.cat(all_boxes)\n all_masks = torch.cat(all_masks)\n all_obj_to_img = torch.cat(all_obj_to_img)\n\n out = (all_imgs, all_objs, all_boxes, all_masks, all_obj_to_img)\n return out", "def _pad_reshape_mask_batch(batch, flat_batch_size, num_devices,\n num_batch_dims):\n batch = _add_mask(batch, num_batch_dims)\n\n def f(x):\n if num_batch_dims > 1:\n x = tf.reshape(x, tf.concat([[-1], x.shape[num_batch_dims:]], axis=0))\n actual_batch_size = tf.shape(x)[0]\n needed = flat_batch_size - actual_batch_size\n zeros = tf.zeros(tf.concat([[needed], x.shape[1:]], axis=0), dtype=x.dtype)\n new_x = tf.concat([x, zeros], axis=0)\n new_x = tf.reshape(new_x, tf.concat([[num_devices, -1], x.shape[1:]],\n axis=0))\n return new_x\n\n new_batch = {k: f(v) for k, v in batch.items()}\n return new_batch", "def _reshape_batch(inputs, size, batch_size):\n batch_inputs = []\n for length_id in range(size):\n batch_inputs.append(np.array([inputs[batch_id][length_id]\n for batch_id in range(batch_size)], dtype=np.int32))\n return batch_inputs", "def split_heads(self, x, batch_size):\n x = tf.reshape(x, (batch_size, -1, self.h, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])", "def _stack_batches(X):\n X = [Classifier._to_numpy(Xb) for Xb in X]\n if len(X[0].shape) == 1:\n return np.hstack(X)\n elif len(X[0].shape) == 2:\n return np.vstack(X)\n else:\n raise ValueError(f\"Can't stack {len(X[0].shape)}-dim batches.\")", "def _reshape(self, arr: np.ndarray) -> np.ndarray:\n return arr.reshape(self.TileHeight.value, self.TileWidth.value, self.bands,)", "def convert_unstack(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axis = op.attr(\"axis\")\n indices_or_sections = len(op.output(\"Y\"))\n outs = _op.split(x, indices_or_sections=indices_or_sections, axis=axis)\n for i, out in enumerate(outs):\n out = _op.squeeze(out, axis=axis)\n g.add_node(op.output(\"Y\")[i], out)", "def custom_collate_fn(batch):\n images, bboxes, context_indices, labels = zip(*batch)\n # images = (img_1, ..., img_N) each element of size [3, img_H, img_W]\n # bboxes = (bboxes_1, ..., bboxes_N) each element of size [n_bboxes_in_image, 4]\n # context_indices = (ci_1, ..., ci_N) each element of size [n_bboxes_in_image, 2*context_size]\n # labels = (labels_1, ..., labels_N) each element of size [n_bboxes_in_image]\n \n images = torch.stack(images, 0)\n \n bboxes_with_batch_index = []\n observed_bboxes = 0\n for i, bbox in enumerate(bboxes):\n batch_indices = torch.Tensor([i]*bbox.shape[0]).view(-1,1)\n bboxes_with_batch_index.append(torch.cat((batch_indices, bbox), dim=1))\n context_indices[i][context_indices[i] != -1] += observed_bboxes\n observed_bboxes += bbox.shape[0]\n bboxes_with_batch_index = torch.cat(bboxes_with_batch_index)\n context_indices = torch.cat(context_indices)\n \n labels = torch.cat(labels)\n \n return images, bboxes_with_batch_index, context_indices, labels", "def combine_heads_2d(inputs):\n transposed = tf.transpose(inputs, [0, 2, 3, 1, 4])\n Nh, channels = shape_list(transposed)[-2:]\n ret_shape = shape_list(transposed)[:-2] + [Nh * channels]\n return tf.reshape(transposed, ret_shape)", "def _reshape_batch(inputs, size, batch_size):\n batch_inputs = []\n for length_id in range(size):\n batch_inputs.append(np.array([inputs[batch_id][length_id]\n for batch_id in range(batch_size)], dtype=np.int32))\n return batch_inputs", "def _reshape_batch(inputs, size, batch_size):\n batch_inputs = []\n for length_id in range(size):\n batch_inputs.append(np.array([inputs[batch_id][length_id]\n for batch_id in range(batch_size)], dtype=np.int32))\n return batch_inputs", "def next_batch(self, batch_size):\n # Get batch\n assert(batch_size == 1)\n em, mask_list, seed_list = self.next_example(self.K)\n\n # Reshape for batch size 1\n em_batch = np.expand_dims(em, 0)\n mask_list = [np.expand_dims(m,0) for m in mask_list]\n \n return em_batch, mask_list", "def _flatten_and_concat(x, batch_shape, dtype):\n # For convenience.\n if x is None:\n return x\n\n def _reshape_part(part):\n part = tf.cast(part, dtype)\n new_shape = ps.concat(\n [batch_shape, [-1]],\n axis=-1,\n )\n return tf.reshape(part, ps.cast(new_shape, tf.int32))\n\n x = tf.nest.map_structure(_reshape_part, x)\n return tf.concat(tf.nest.flatten(x), axis=-1)", "def get_batch(self, src, geometries):\n\n batch = []\n for bounds in geometries.bounds.itertuples():\n bot, left = src.index(bounds[1], bounds[2])\n top, right = src.index(bounds[3], bounds[4])\n window = rasterio.windows.Window(left, top, right-left, bot-top)\n batch.append(src.read(indexes=self.indexes, window=window))\n if self.interleave == 'pixel' and len(batch[-1].shape) == 3:\n batch[-1] = np.moveaxis(batch[-1], 0, -1)\n for func,args,kwargs in self.preprocess.values():\n batch[-1] = func(batch[-1], *args, **kwargs)\n\n return np.stack(batch)", "def batchify(data, batch_size):\n n_batch = data.shape[0] // batch_size\n data = data[:n_batch * batch_size]\n data = data.reshape((batch_size, n_batch)).T\n return data", "def get_4d(slice, copylayers=[], transparancy=0):\n assert slice.ndim < 3\n img = np.zeros(slice.shape)\n img = img[:, :, np.newaxis]\n img = np.repeat(img, 4, 2)\n transparancy = 255 - (255 * transparancy)\n img[:, :, -1] = transparancy\n for layer in copylayers:\n img[:, :, layer] = slice\n return(img)", "def basic_collate(batch):\n\n minibatch, targets = zip(*[(a, b) for (a,b) in batch])\n minibatch = stack(minibatch, dim=0)\n return minibatch, targets", "def __getitem__(self, batch_index):\n batch_images = np.zeros(shape=(self.batch_size, *MODEL_INPUT_SIZE, MODEL_INPUT_CHANNELS), dtype=np.float32)\n # For ages use -1 instead of zeros, because for black images age should be 0 months\n batch_ages = np.full(shape=(self.batch_size, 1), fill_value=-1, dtype=np.float32)\n batch_males = np.zeros(shape=(self.batch_size, 1), dtype=np.uint8)\n\n # Generate image indexes of the batch\n batch_image_indexes = self.image_indexes[batch_index * self.batch_size:(batch_index + 1) * self.batch_size]\n\n for item_number, batch_image_index in enumerate(batch_image_indexes):\n image_id = self.image_ids[batch_image_index][0]\n age = self.ages[batch_image_index]\n male = self.males[batch_image_index]\n\n image_path = self.images_path / f'{image_id}.png'\n image = skimage.io.imread(str(image_path))\n image = normalized_image(image)\n\n if self.is_train:\n augmented_image = augmentate_image(image)\n else:\n augmented_image = image\n\n augmented_image = augmented_image * 255\n augmented_image = np.stack((augmented_image,) * MODEL_INPUT_CHANNELS, axis=-1)\n batch_images[item_number, ...] = augmented_image\n\n batch_ages[item_number, ...] = age\n batch_males[item_number, ...] = male\n\n batch_images = preprocess_input(batch_images)\n return [batch_images, batch_males], batch_ages", "def process_state_batch(self, batch):\n # batch = np.squeeze(batch, axis=1)\n batch = np.array([np.concatenate(obs, axis=-1) for obs in batch])\n return batch", "def _collate(cls, inbatch, num_devices=None):\n item0 = inbatch[0]\n bsize = len(inbatch)\n if num_devices is None:\n num_devices = 1\n\n samples_per_device = int(np.ceil(bsize / num_devices))\n\n # assert bsize % samples_per_device == 0\n stacked = []\n if item0.cpu_only:\n # chunking logic\n stacked = []\n for i in range(0, bsize, samples_per_device):\n stacked.append(\n [sample.data for sample in inbatch[i:i + samples_per_device]])\n\n elif item0.stack:\n for i in range(0, bsize, samples_per_device):\n item = inbatch[i]\n pad_dims_ = item.pad_dims\n assert isinstance(item.data, torch.Tensor)\n\n if pad_dims_ is not None:\n # Note: can probably reimplement this using padded collate\n # logic\n ndim = item.dim()\n assert ndim > pad_dims_\n max_shape = [0 for _ in range(pad_dims_)]\n for dim in range(1, pad_dims_ + 1):\n max_shape[dim - 1] = item.shape[-dim]\n for sample in inbatch[i:i + samples_per_device]:\n for dim in range(0, ndim - pad_dims_):\n assert item.shape[dim] == sample.shape[dim]\n for dim in range(1, pad_dims_ + 1):\n max_shape[dim - 1] = max(max_shape[dim - 1], sample.shape[-dim])\n padded_samples = []\n for sample in inbatch[i:i + samples_per_device]:\n pad = [0 for _ in range(pad_dims_ * 2)]\n for dim in range(1, pad_dims_ + 1):\n pad[2 * dim - 1] = max_shape[dim - 1] - sample.shape[-dim]\n padded_samples.append(\n F.pad(sample.data, pad, value=sample.padding_value))\n stacked.append(default_collate(padded_samples))\n\n elif pad_dims_ is None:\n stacked.append(\n default_collate([\n sample.data\n for sample in inbatch[i:i + samples_per_device]\n ]))\n else:\n raise ValueError(\n 'pad_dims should be either None or integers (1-3)')\n\n else:\n for i in range(0, bsize, samples_per_device):\n stacked.append(\n [sample.data for sample in inbatch[i:i + samples_per_device]])\n result = BatchContainer(stacked, **item0.meta)\n return result", "def _create_chunks(opts, inputs, idx1, idx2):\n # idx2 = 75\n # idx1 = 71\n num_batch = idx2 - idx1\n # img1 = torch.zeros(num_batch, 1, 10, 224, 224)\n # img2 = torch.zeros(num_batch, 1, 10, 224, 224)\n # labels = torch.zeros(num_batch)\n\n feat1_list = []\n label_list = []\n for i in range(num_batch):\n curr_idx = i + idx1\n frames = range(curr_idx - 5, curr_idx + 5)\n temp1 = _load_chunk(opts, inputs, frames)\n feat1_list.append(temp1)\n\n temp_label = inputs[1][curr_idx, :].nonzero()\n if len(temp_label.size()) == 0:\n temp_label = 6\n else:\n if temp_label.size()[0] != 0:\n temp_label = temp_label[0][0]\n label_list.append(temp_label)\n\n feat1 = torch.cat(feat1_list, dim=0)\n labels = torch.LongTensor(label_list)\n return feat1, labels", "def _extract(self, a, t, x_shape):\n batch_size = x_shape[0]\n out = tf.gather(a, t)\n return tf.reshape(out, [batch_size, 1, 1, 1])", "def process_batch(batch):\n args = get_args()\n\n tokens = batch['text'].long().cuda().contiguous()\n types = batch['types'].long().cuda().contiguous()\n labels = batch['label'].long().cuda().contiguous()\n attention_mask = batch['padding_mask'].float().cuda().contiguous()\n if args.fp16:\n attention_mask = attention_mask.half()\n\n return tokens, types, labels, attention_mask", "def __init__(self, ndim, nchannels=2, channels=None):\n self.channels, self.indices_split, self.indices_merge = split_merge_indices(ndim, nchannels=nchannels,\n channels=channels)", "def normalise_stack(stack):\n\n for index in range(stack.shape[2]):\n stack[:,:,index] = normalise_slice(stack[:,:,index])\n\n return stack", "def my_detection_collate(batch):\n targets_1 = []\n imgs = []\n for sample in batch:\n # each sample is the result of one query on the dataset object\n imgs.append(sample[0])\n targets_1.append(torch.FloatTensor(sample[1]))\n return torch.stack(imgs, 0), targets_1", "def _msdd_infer_collate_fn(self, batch):\n\n packed_batch = list(zip(*batch))\n feats, feats_len, targets, ms_avg_embs = packed_batch\n feats_list, flen_list, targets_list, ms_avg_embs_list = [], [], [], []\n max_audio_len = max(feats_len)\n max_target_len = max([x.shape[0] for x in targets])\n\n for feature, feat_len, target, ivector in batch:\n flen_list.append(feat_len)\n ms_avg_embs_list.append(ivector)\n if feat_len < max_audio_len:\n pad_a = (0, 0, 0, 0, 0, max_audio_len - feat_len)\n pad_t = (0, 0, 0, max_target_len - target.shape[0])\n padded_feature = torch.nn.functional.pad(feature, pad_a)\n padded_target = torch.nn.functional.pad(target, pad_t)\n feats_list.append(padded_feature)\n targets_list.append(padded_target)\n else:\n targets_list.append(target.clone().detach())\n feats_list.append(feature.clone().detach())\n\n feats = torch.stack(feats_list)\n feats_len = torch.tensor(flen_list)\n targets = torch.stack(targets_list)\n ms_avg_embs = torch.stack(ms_avg_embs_list)\n return feats, feats_len, targets, ms_avg_embs", "def batch_flatten(x):\n shape = x.get_shape().as_list()[1:]\n if None not in shape:\n return tf.reshape(x, [-1, int(np.prod(shape))])\n return tf.reshape(x, tf.stack([tf.shape(x)[0], -1]))", "def batch_flatten(x):\n shape = x.get_shape().as_list()[1:]\n if None not in shape:\n return tf.reshape(x, [-1, int(np.prod(shape))])\n return tf.reshape(x, tf.stack([tf.shape(x)[0], -1]))", "def prepare(dataset):\n dataset = dataset.reshape(dataset.shape[0], 1, 28, 28)\n dataset = dataset.astype('float32')\n dataset /= 255\n return dataset", "def batch_split(self) -> np.array:\n pass", "def dimension_preprocess(self, data, padding=True):\r\n\r\n assert len(data.shape) == 2, \"Data dimension expected to be ( xline, samp_point)\"\r\n if padding:\r\n if data.shape[0] < self.rows:\r\n padding = np.ones((self.rows - data.shape[0], data.shape[1]))\r\n data = np.concatenate((data, padding), axis=0)\r\n if data.shape[1] < self.cols:\r\n padding = np.ones((data.shape[0], self.cols - data.shape[1]))\r\n data = np.concatenate((data, padding), axis=1)\r\n x_chunks, y_chunks = self.get_chunks(data)\r\n images = []\r\n for x in x_chunks:\r\n for y in y_chunks:\r\n images.append(\r\n data[x[0]:x[1], y[0]:y[1]]\r\n )\r\n images = np.array(images)\r\n\r\n return images", "def __getitem__(self, index):\n # Generate indexes of the batch\n data_index_min = int(index * self.batch_size)\n data_index_max = int(min((index + 1) * self.batch_size, len(self.image_filenames)))\n\n indexes = self.image_filenames[data_index_min:data_index_max]\n\n this_batch_size = len(indexes) # The last batch can be smaller than the others\n\n # Defining dataset\n X = np.empty((this_batch_size, self.image_size, self.image_size, 3), dtype=np.float32)\n y = np.empty((this_batch_size, self.image_size, self.image_size, self.nb_y_features), dtype=np.uint8)\n\n for i, sample_index in enumerate(indexes):\n\n X_sample, y_sample = self.read_image_mask(self.image_filenames[index * self.batch_size + i],\n self.mask_names[index * self.batch_size + i])\n\n # if augmentation is defined, we assume its a train set\n if self.augmentation is not None:\n\n # Augmentation code\n augmented = self.augmentation(self.image_size)(image=X_sample, mask=y_sample)\n image_augm = augmented['image']\n mask_augm = augmented['mask'].reshape(self.image_size, self.image_size, self.nb_y_features)\n X[i, ...] = np.clip(image_augm, a_min=0, a_max=1)\n y[i, ...] = mask_augm\n\n # if augmentation isnt defined, we assume its a test set. \n # Because test images can have different sizes we resize it to be divisable by 32\n elif self.augmentation is None and self.batch_size == 1:\n X_sample, y_sample = self.read_image_mask(self.image_filenames[index * 1 + i],\n self.mask_names[index * 1 + i])\n augmented = Resize(height=(X_sample.shape[0] // 32) * 32, width=(X_sample.shape[1] // 32) * 32)(\n image=X_sample, mask=y_sample)\n X_sample, y_sample = augmented['image'], augmented['mask']\n\n return X_sample.reshape(1, X_sample.shape[0], X_sample.shape[1], 3).astype(\n np.float32), y_sample.reshape(1, X_sample.shape[0], X_sample.shape[1], self.nb_y_features).astype(\n np.uint8)\n\n return X, y", "def __getitem__(self, idx):\n\n def load_image_mask(idx):\n img = cv2.imread(os.path.join(self.img_path, self.img_files[idx]))\n mask = cv2.imread(os.path.join(self.mask_path, self.img_files[idx]), cv2.IMREAD_GRAYSCALE)\n return img, mask\n\n # retrieve current image index and current augmentation index\n curr_img_idx, curr_augm_idx = self.__get_img_augm_idx__(idx)\n\n batch_img = []\n batch_mask = []\n\n img, mask = load_image_mask(curr_img_idx)\n batch_gen_iter = 0\n\n # generate AT MOST self.batch_size images\n\n while batch_gen_iter < self.batch_size:\n\n if curr_augm_idx < self.gen_count:\n\n # there are still augmentations to generate for current image\n # let's generate them\n\n if mask is None:\n print(f\"== WARNING: Image {self.img_files[curr_img_idx]}\" +\n f\"does not have corresponding mask in \\\"{self.mask_path}\\\"; skipping ==\")\n\n else:\n crop_res = self.crop_compose(image=img, mask=mask)\n augm_img, augm_mask = crop_res[\"image\"], crop_res[\"mask\"]\n\n if curr_augm_idx != 0 and self.augm:\n augm_res = self.augm_compose(image=augm_img, mask=augm_mask)\n augm_img, augm_mask = augm_res[\"image\"], augm_res[\"mask\"]\n\n # threshold and transform mask for NN model\n\n _, augm_mask = cv2.threshold(augm_mask, 127, 255, cv2.THRESH_BINARY)\n augm_mask = np.stack([(augm_mask == 255)], axis=-1).astype('float')\n\n # append augmented image and mask to batches\n\n batch_img.append(augm_img)\n batch_mask.append(augm_mask)\n\n curr_augm_idx += 1\n batch_gen_iter += 1\n\n else:\n\n # all augmentations for current images have been generated\n # move to next image\n\n curr_img_idx += 1\n curr_augm_idx = 0\n\n if curr_img_idx < len(self.img_files):\n img, mask = load_image_mask(curr_img_idx)\n else:\n break\n\n return np.array(batch_img), np.array(batch_mask)", "def _extract_features(self, all_batches, patch_size, train=True):\n # manually derive basic intensities features\n # takes 20 sec / 1048 images batch on my laptop in 4 cores //\n p = patch_size\n r = 512 // p\n labels = np.empty(0)\n feats = np.empty(0)\n for counter, tmp in enumerate(all_batches):\n # if counter == 2:\n # break\n if train:\n batch_img, batch_label = tmp\n else:\n batch_img = tmp\n batch_label = np.empty(0)\n # just for testing just use 20 batch as training set\n print('processing batch {}'.format(counter))\n t1 = time.time()\n batch_feats = np.asarray(\n parmap.map(\n self._get_features_from_batch_images,\n batch_img,\n r,\n p,\n pm_pbar=True))\n print(time.time() - t1)\n labels = np.concatenate(\n (labels, batch_label)) if labels.size else batch_label\n feats = np.concatenate(\n (feats, batch_feats)) if feats.size else batch_feats\n if train:\n return feats, labels\n else:\n return feats", "def shatter_batch(self, batch):\n return [tuple([elem[i] for elem in batch])\n for i in range(batch.size)]", "def stack_tifs(tifseries, xsize, ysize):\n holster = np.empty((len(tifseries), ysize, xsize))\n for i, layer in enumerate(tifseries):\n lay = Image.open(layer)\n lis = list(lay.getdata())\n holster[i] = np.array(lis, np.uint16).reshape(512, 512)\n\n return holster", "def test_reshape_conv_slice_conv():\n class Net(gluon.HybridBlock):\n def __init__(self, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.conv0 = nn.Conv2D(16, (3, 3))\n self.conv1 = nn.Conv2D(32, (3, 3))\n\n def hybrid_forward(self, F, x):\n x_reshape = x.reshape((0, 0, 64, 16))\n y = self.conv0(x_reshape)\n \"shape of y is (4, 16, 62, 14)\"\n y_slice = y.slice(begin=(0, 0, 0, 0), end=(2, 16, 14, 14))\n out = self.conv1(y_slice)\n return out\n x = mx.nd.random.uniform(shape=(4, 3, 32, 32))\n net = Net()\n check_layer_forward_withinput(net, x)", "def expand_images(X):\n\n X_ex = np.empty((X.shape[0] * X.shape[1], X.shape[2])) * np.nan\n\n for n in range(0, X.shape[2]):\n X_ex[:,n] = X[:,:,n].flatten()\n\n return X_ex", "def split_heads(x, batch_size, num_heads, depth):\n x = tf.reshape(x, (batch_size, -1, num_heads, depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])", "def collate_fn(batch):\n all_input_ids, all_attention_mask, all_token_type_ids, all_lens, all_labels = map(torch.stack, zip(*batch))\n max_len = max(all_lens).item()\n all_input_ids = all_input_ids[:, :max_len]\n all_attention_mask = all_attention_mask[:, :max_len]\n all_token_type_ids = all_token_type_ids[:, :max_len]\n return all_input_ids, all_attention_mask, all_token_type_ids, all_labels", "def batch_stack(S):\n\tC, M = S.shape[3], S.shape[-1]\n\treturn S.reshape(-1,C,M,M)", "def data_augmentation(image_data, mask_data, rotate=False, vertical_flip=False, horizontal_flip=False):\n aug_images = []\n aug_masks = []\n\n for _ in range(len(image_data)):\n if rotate:\n rotation = A.RandomRotate90(p=1)\n rotated_data = rotation(image=image_data[_], mask=mask_data[_])\n rotated_image = rotated_data['image']\n rotated_mask = rotated_data['mask']\n aug_images.append(rotated_image)\n aug_masks.append(rotated_mask)\n\n if vertical_flip:\n flip_v = A.VerticalFlip(p=1)\n vertical_data = flip_v(image=image_data[_], mask=mask_data[_])\n vertical_image = vertical_data['image']\n vertical_mask = vertical_data['mask']\n aug_images.append(vertical_image)\n aug_masks.append(vertical_mask)\n\n if horizontal_flip:\n flip_h = A.HorizontalFlip(p=1)\n horizontal_data = flip_h(image=image_data[_], mask=mask_data[_])\n horizontal_image = horizontal_data['image']\n horizontal_mask = horizontal_data['mask']\n aug_images.append(horizontal_image)\n aug_masks.append(horizontal_mask)\n\n nd_images = make_ndarray(aug_images)\n nd_masks = make_ndarray(aug_masks)\n #nd_images = np.zeros((len(aug_images), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.float32)\n #nd_masks = np.zeros((len(aug_masks), IMG_HEIGHT, IMG_WIDTH), dtype=np.float32)\n\n #for _ in range(len(aug_images)): # Load into ndarray\n # nd_images[_] = aug_images[_]\n # nd_masks[_] = aug_masks[_] # load mask without channel variable\n\n return nd_images, nd_masks", "def load_dataset(image_home, mask_home, patient_list, \n size = 512, \n downsample = 0.5, \n overlap = 1.5, \n verbose=False):\n\n image_list = np.concatenate([sorted(glob.glob(f'{image_home}/{p}/*')) for p in patient_list])\n mask_list = np.concatenate([sorted(glob.glob(f'{mask_home}/{p}/*')) for p in patient_list])\n\n if verbose:\n for i, (im, m) in enumerate(zip(image_list, mask_list)):\n print(i, im, m)\n\n x = []\n y = [] \n\n for im, m in zip(image_list, mask_list):\n image = cv2.imread(im)[:,:,::-1]\n mask = cv2.imread(m, -1)\n mask = squash_labels(mask)\n \n image = cv2.resize(image, dsize=(0,0), fx=downsample, fy=downsample)\n mask = cv2.resize(mask, dsize=(0,0), fx=downsample, fy=downsample,\n interpolation=cv2.INTER_NEAREST)\n\n # assert (image.shape == mask.shape).all()\n split_x , split_y = split(image, mask, int(size * downsample), overlap)\n\n x.append(split_x)\n y.append(split_y)\n\n\n x = np.concatenate(x, axis=0)\n y = np.concatenate(y, axis=0)\n y = np.eye(N=y.shape[0], M=4)[y]\n\n shuffle = np.arange(x.shape[0]).astype(np.int)\n np.random.shuffle(shuffle)\n x = x[shuffle, :]\n y = y[shuffle, :]\n\n x = (x / 255.).astype(np.float32)\n\n print('split_datasets returning x:', x.shape, x.dtype, x.min(), x.max())\n print('split_datasets returning y:', y.shape, y.dtype)\n return x, y", "def collate_fn(self, batch):\n images, boxes, categories = [], [], []\n\n for b in batch:\n images.append(b['img'])\n boxes.append(b['box'])\n categories.append(b['category'])\n\n images = torch.stack(images, dim=0)\n\n # tensor (N, 3, 300, 300), 3 lists of N tensors each\n return {\n 'imgs': images,\n 'boxes': boxes,\n 'categories': categories\n }" ]
[ "0.59909207", "0.58123934", "0.5716505", "0.5716326", "0.5704696", "0.5700815", "0.55700964", "0.5564052", "0.55450964", "0.55180305", "0.551707", "0.54962254", "0.5416754", "0.5411667", "0.5395286", "0.5373396", "0.53642625", "0.5344805", "0.5337699", "0.5336697", "0.53321487", "0.53207237", "0.5320249", "0.5304845", "0.5279684", "0.5278637", "0.5278417", "0.5277993", "0.52759033", "0.5266497", "0.52631533", "0.52631533", "0.5260358", "0.52551067", "0.52398086", "0.5239735", "0.52209365", "0.5217683", "0.52139956", "0.5208937", "0.52051085", "0.5202056", "0.5196048", "0.5193553", "0.5191278", "0.5182818", "0.51523316", "0.5150871", "0.51483613", "0.5147269", "0.5145486", "0.51452184", "0.51403934", "0.5129095", "0.51274174", "0.5125545", "0.51209277", "0.51164186", "0.51140004", "0.51011795", "0.50871086", "0.5078131", "0.5068424", "0.50648326", "0.50646585", "0.50646585", "0.50642115", "0.505976", "0.50542337", "0.5048132", "0.50408417", "0.50340056", "0.50339884", "0.5029871", "0.5026819", "0.5024301", "0.501998", "0.5011631", "0.50083655", "0.4998001", "0.49979854", "0.49975622", "0.4997153", "0.4997153", "0.49900806", "0.4988196", "0.49798745", "0.49785602", "0.49777052", "0.49753985", "0.4974396", "0.4968844", "0.49666342", "0.49625766", "0.4956245", "0.49543568", "0.49446675", "0.4943424", "0.49376357", "0.49309808" ]
0.7611791
0
Animals that can speak are correctly identified
def test_animals_can_speak(self): self.assertEqual(self.lion, 'roar') self.assertEqual(self.cat, 'meow')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def animals_by_species(self):\n print self.animal()", "def animal_eats(self):\n self.update_fodder()\n self.herbivore_eats()\n self.carnivore_eats()", "def substantiate():", "def speak(self):\n # Speaks randomly to another agent on the same cell\n anticipated_meaning = None\n cellmates = self.model.grid.get_cell_list_contents([self.pos])\n\n # If other agents on the same cell\n if len(cellmates) > 1:\n hearer = self.random.choice(cellmates)\n\n while (hearer == self): # agents should not talk to themselves\n hearer = self.random.choice(cellmates)\n\n meaning = self.random.choice(self.model.schedule.agents).unique_id\n\n # If the speaker is not acquainted with the meaning\n if meaning not in self.meanings:\n print(\"New meaning added to speaker\")\n self.meanings.append(meaning)\n return Conversation(word=None, meaning=None, success=0.0)\n\n # If the hearer is not acquainted with the meaning\n if meaning not in hearer.meanings:\n print(\"New meaning added to hearer\")\n hearer.meanings.append(meaning)\n return Conversation(word=None, meaning=None, success=0.0)\n\n # 50% chance of having an anticipated meaning default\n if self.random.random() <= self.model.antecipated_prob:\n print(\" \" + str(self.unique_id) +\n \" points at \" + str(meaning))\n anticipated_meaning = meaning\n\n # If the speaker has a word for the meaning\n if meaning in self.meaning2word:\n word = self.meaning2word[meaning]\n\n # If the hearer has a word for the meaning\n if word in hearer.word2meaning:\n # If the hearer has no anticipated meaning\n if anticipated_meaning == None:\n return Conversation(word=word, meaning=meaning, success=1.0)\n # If anticipated meaning different from hearer meaning\n if (anticipated_meaning != None\n and anticipated_meaning != hearer.word2meaning[word]):\n hearer.delete_link(word)\n hearer.create_link(word, anticipated_meaning)\n return None\n # If anticipated meaning same as hearer meaning\n if (anticipated_meaning != None\n and anticipated_meaning == hearer.word2meaning[word]):\n return Conversation(word=word, meaning=meaning, success=1.0)\n\n # If the hearer has no word for the meaning\n else:\n # If anticipated meaning same as speaker meaning\n if (anticipated_meaning != None\n and word not in hearer.word2meaning\n and anticipated_meaning not in hearer.meaning2word):\n hearer.create_link(word, anticipated_meaning)\n return Conversation(word=word, meaning=meaning, success=0.0)\n\n # If the speaker has no word for the meaning\n if meaning not in self.meaning2word:\n return Conversation(word=None, meaning=meaning, success=0.0)", "def add_animal(self, animal):\n try:\n if animal.saltwater:\n super().add_animal(animal)\n except AttributeError:\n raise AttributeError(\"Animal Is Incompatible With Biome\")", "def on_object(self, image, objects):\n for obj in objects:\n if self.is_object_recognition_appropriate(obj.name):\n self.say(\"I see a {}\".format(obj.name))", "def _sense_and_act(self):\n pass", "def recognize():\n return 0", "def animals(self):\n return self.herbivores + self.carnivores", "def all_animals_eat(self):\n for cell in itertools.chain.from_iterable(self.map):\n if type(cell).__name__ in self.allowed_cells:\n cell.gen_fodder()\n cell.eat_herbivore()\n cell.eat_carnivore()", "def animals_gives_birth(self):\n for species, animals in self.new_fauna_list.items():\n for i in range(math.floor(len(self.new_fauna_list[species])/2)):\n animal = animals[i]\n if animal.probability_of_birth(len(animals)):\n offspring_species = animal.__class__\n offspring = offspring_species()\n animal.update_weight_after_birth(offspring)\n if animal.gives_birth:\n self.fauna_list[species].append(offspring)\n animal.gives_birth = False", "def __init__(self):\n IContainsAnimals.__init__(self, 15)\n IContainsPlants.__init__(self, 3)\n Identifiable.__init__(self)\n Biome.__init__(self, \"Coastline\")", "def known_organisms():\n return [\"rat\"]", "def _get_animal_from_message(self, message):\n animal = None\n\n # Try to find an animal from our inventory in the message\n find_animal_regex = r'({animals})'.format(animals='|'.join(self.animals))\n ret = re.findall(find_animal_regex, message)\n\n # re.findall return is a list of matching strings in the message\n # Is an empty list if no match found\n if ret:\n animal = random.choice(ret)\n\n return animal", "def is_actor():\n return False", "def animal_dies(self):\n for species, animals in self.fauna_list.items():\n for animal in animals:\n if animal.probability_of_death:\n self.remove_animal(animal)", "def test_check_ambigous(self):\r\n\r\n flow0 = Flowgram(\"\")\r\n flow1 = Flowgram(\r\n \"0 1.2 2.1 3.4 0.02 0.01 1.02 0.08 0.5 1.0 4.1 0.0 0.0 1.23 0.0 3.1\")\r\n flow2 = Flowgram(\r\n \"0 1.2 2.1 3.4 0.02 0.01 1.02 0.08 0.5 1.0 4.1 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.23 0.0 3.1\")\r\n flow3 = Flowgram(\r\n \"0 1.2 2.1 3.4 0.02 0.0 0.0 0.01 1.02 0.08 0.5 1.0 4.1 0.0 0.0 0.0 0.0 1.23 0.0 3.1\")\r\n\r\n self.assertEqual(check_ambigous(flow0, 4), False)\r\n self.assertEqual(check_ambigous(flow1, 4), False)\r\n self.assertEqual(check_ambigous(flow2, 4), True)\r\n self.assertEqual(check_ambigous(flow2, 7), True)\r\n self.assertEqual(check_ambigous(flow2, 8), False)\r\n self.assertEqual(check_ambigous(flow3, 3), True)\r\n self.assertEqual(check_ambigous(flow3, 4), False)", "def test_interaction_accepts_name():\n demag = ThinFilmDemag()\n assert hasattr(demag, 'name')", "async def fox(self, interaction: Interaction):\n await post_random_animal_command(interaction)", "def add(self, animal):\n x, y = animal.coords\n if animal.species == 'Elephant' and self.__nb_elephants < 5 and (x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_elephants += 1\n self.playerTurn = \"Rhinoceros\"\n\n elif animal.species == 'Rhinoceros' and self.__nb_rhinoceros < 5 and (x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_rhinoceros += 1\n self.playerTurn = \"Elephant\"\n else:\n return False", "def think(s):", "def ate_poison(self):\r\n for food in self.get_dna():\r\n i = food.split(\",\")\r\n if \"poison\" in food:\r\n print(self.get_species(), \"from generation\", self.get_generation(),\r\n \"died from poison after eating\", i[0])\r\n return (True)", "def sense_and_act(self):\n pass", "def classify(self, audio_sample, should_print=True):\n features_left, features_right = self.extract_features(audio_sample)\n classification_counts = [0 for x in range(len(self.speakers))]\n\n for i in range(len(features_left)):\n feature = np.reshape(features_left[i, :], (1, -1))\n\n left_pred = int(self.left_model.predict(feature)[0])\n classification_counts[left_pred] += 1\n\n if self.both_channels:\n right_pred = int(self.right_model.predict(feature)[0])\n classification_counts[right_pred] += 1\n\n probabilities = np.array(classification_counts) / sum(classification_counts)\n pred = np.argmax(probabilities)\n\n if should_print:\n print(probabilities)\n\n if probabilities[pred] > self.certainty:\n print(\"Identified %s\" % self.speakers[pred])\n return self.speakers[pred]\n else:\n print(\"Unidentified Speaker\")\n return -1", "def handDecision(handIn):", "def random_data(self) -> (str, str):\n random_animal = random.choice(self.animals_list)\n synsets = wn.synsets(str(random_animal))\n definition = \"\"\n while True:\n if len(synsets) != 0:\n for synset in synsets:\n if synset.lexname() == 'noun.animal':\n definition = synset.definition()\n break\n else:\n random_animal = random.choice(self.animals_list)\n synsets = wn.synsets(str(random_animal))\n return random_animal, definition", "def an(text):\n text = force_unicode(text)\n if not CONSONANT_SOUND.match(text) and VOWEL_SOUND.match(text):\n return 'an'\n return 'a'", "def immobilized(self, owner):\n messages = []\n immobilized = False\n \n if self.checkOver(owner, messages):\n immobilized = False\n \n elif self.confused(random.randint(0, 1)):\n self.doDamage(owner, messages)\n immobilized = True\n \n return immobilized, messages", "def test_article_can_speak(self):\n lion = Animal.objects.get(title=\"Akun KU\")\n self.assertEqual(lion.title, 'The lion says \"roar\"')", "def test_basic_inter_format(self):\n buff_interactions = isambard.buff.find_inter_ampal(\n self.topo, self.ff.distance_cutoff)\n for _ in range(100):\n a, b = random.choice(buff_interactions)\n self.assertTrue(type(a) is isambard.ampal.Atom)\n self.assertTrue(type(b) is isambard.ampal.Atom)\n self.assertTrue(a != b)", "def test_actor_matches_activity(self):", "def test_selecting_only_audio_episodes(\n only_audio_episodes: List[LepEpisode],\n) -> None:\n assert len(only_audio_episodes) == 14 # Without duplicates", "def get_animals(self):\n for animal in self.animals:\n print(animal)", "def think(self):\n pass", "def mate_all_animals(self):\n for cell in itertools.chain.from_iterable(self.map):\n if type(cell).__name__ in self.allowed_cells:\n cell.mating()", "def sniff( self, filename ):\n handle = open(filename)\n line = handle.readline()\n handle.close()\n first = line.split()\n\n # 0 -> ID animal\n #read fixed effect\n i=1\n while (first[i].isalnum() and i<len(first)):\n i=i+1\n\n if ( i >= len(first) ):\n return False\n\n #read cov\n while ((i+1)<len(first) and not first[i+1].isalnum() ):\n i=i+1\n\n if ( i+1 >= len(first) ):\n return False\n\n #read trait\n while ((i+1)<len(first) and (i+2)<len(first) and not first[i].isalnum() and first[i+1].isalnum() and first[i+2].isalnum()):\n i=i+3\n\n if ( i != len(first) ):\n return False\n\n return True", "def can_read(self):\n\n return super(RamanSpeReader, self).can_read(extension='spe')", "def init_animals(self, prey_cnt, predator_cnt):\n cnt = 0\n # While loop continues until prey_cn unoccupied positions are found\n while cnt < prey_cnt:\n x = random.randint(0, self.grid_size-1)\n y = random.randint(0, self.grid_size-1)\n if not self.animal(x, y):\n new_prey = Prey(island=self, x=x, y=y)\n cnt += 1\n self.register(new_prey)\n cnt = 0\n while cnt < predator_cnt:\n x = random.randint(0, self.grid_size-1)\n y = random.randint(0, self.grid_size-1)\n if not self.animal(x, y):\n new_pred = Predator(island=self, x=x, y=y)\n cnt += 1\n self.register(new_pred)", "def find_new_people(self):\n #greets people, only greets once while they're in the camera's view and are center of attention\n\n\n if (self.person is not None) and (self.person.acknowledged == False):\n self.person.acknowledged = True\n print \"I see you!\"\n self.idle_pub.publish(\"idle:stop\")\n time.sleep(2)\n\n greeting = [\"R_nudge\",\"R_look\"]\n for msg in greeting:\n self.behavior_pub.publish(msg)\n self.check_completion()\n\n\n self.detection_pub.publish('found')\n\n elif self.person is None:\n print \"I don't see you\"\n self.detection_pub.publish('nothing')", "def analyse ( self ) :\n odin = self.get( self.RootInTES + 'DAQ/ODIN' )\n \n ## Check for PVs\n PVs = self.get( self.RootInTES + self.InputPrimaryVertices )\n if not PVs or PVs.size() == 0:\n self.setFilterPassed( False )\n return SUCCESS\n\n ## get recontructed B+ mesons\n Bs = self.select ( 'B' , eval( self._cut % self._selection ) )\n \n if not Bs or Bs.size() == 0:\n self.setFilterPassed( False )\n return SUCCESS \n\n ## Select random candidate\n r = self.random( odin )\n n = Bs.size()\n for i in xrange( n ):\n if r <= ( float( i ) / float( n ) ): break\n B = Bs[ i ]\n \n tisTos = self.tisTosSignal( B, \"Hlt1Track(AllL0|Muon)Decision\" )\n if tisTos.tos():\n ## This has to be a clone, otherwise it doesn't work...\n self.markParticle( B.clone() )\n self.setFilterPassed( True )\n else:\n self.setFilterPassed( False )\n\n return SUCCESS", "def is_summon(self):\n return False", "def test_instances(self):\n kb = logic.PropKB()\n kb.tell(logic.expr('ISA(mammal, animal)'))\n kb.tell(logic.expr('ISA(cat, mammal)'))\n kb.tell(logic.expr('INSTANCEOF(petunia, cat)'))\n self.assertAllBindingsEqual(\n kb.ask_all(logic.expr('ISA(petunia, ?x)')),\n [{'?x': 'petunia'}, {'?x': 'cat'}, {'?x': 'mammal'}, {'?x': 'animal'}])\n self.assertAllBindingsEqual(\n kb.ask_all(logic.expr('ISINSTANCE(petunia)')), [{}])\n self.assertAllBindingsEqual(\n kb.ask_all(logic.expr('ISINSTANCE(?x)')), [{'?x': 'petunia'}])\n\n self.assertAllBindingsEqual(\n kb.ask_all(logic.expr('INSTANCEOF(petunia, ?x)')),\n [{'?x': 'cat'}])\n self.assertAllBindingsEqual(\n kb.ask_all(logic.expr('ISINSTANCE(?x)')), [{'?x': 'petunia'}])\n self.assertAllBindingsEqual(kb.ask_all(logic.expr('INSTANCEOF(?x, ?y)')),\n [{'?x': 'petunia', '?y': 'cat'}])\n self.assertAllBindingsEqual(\n kb.ask_all(logic.expr('ISINSTANCE(petunia)')), [{}])", "def _(animal):\n print(\"Searching the garden's animals\")\n return animal in _animals", "def test_one_by_one_crossAdiMilan(self):\n human_detector_inst = HumanDetector(\n find_humans_from_video_file_name='videos/TempleVideos/one_by_one_cross_AdiMilan.mp4',\n use_pi_camera=False, open_display=True)\n self.assertEqual(human_detector_inst.perform_job(), None)\n human_centroid_dict = human_detector_inst.get_human_centroid_dict()\n self.assertEqual(len(human_centroid_dict), 2)\n self.assertEqual(human_centroid_dict[0].direction, Direction.EXIT)\n self.assertEqual(human_centroid_dict[1].direction, Direction.ENTER)\n self.assertEqual(SendReceiveMessages().get_face_detected_count_locally(), 0)\n human_detector_inst.clean_up()\n self.__cleanup()", "def probe(self):", "def test_identification_banana_vs_bowl_vs_food_can(self):\n # Getting the dataset\n bowl_ids = ['fa61e604661d4aa66658ecd96794a1cd',\n 'f74bba9a22e044dea3769fcd5f96f4',\n 'd2e1dc9ee02834c71621c7edb823fc53']\n banana_ids = ['f6e6117261dca163713c042b393cc65b',\n 'ba0d56295321002718ddbf38fa69c501',\n '7d78e217e0ba160fe2b248b8bb97d290']\n bowls = []\n for bowl_id in bowl_ids:\n bowls.append(SketchupModel.find_google_id(bowl_id))\n bananas = []\n for banana_id in banana_ids:\n bananas.append(SketchupModel.find_google_id(banana_id))\n # Training\n iden = Identifier()\n iden.add_models(bananas, 'banana')\n iden.add_models(bowls, 'bowl')\n iden.train()\n # Identification\n for i in range(20):\n example = Example.get_random(['banana', 'bowl'])\n pcd_file = example.pcd_file()\n print \"Identification of file {}\".format(example)\n cloud = PointCloud.load_pcd(pcd_file.name)\n iden.identify(cloud)", "def test_guess_nutrition_by_dish_name(self):\n pass", "def test_create(self):\n cat = self.animal_factory.create(\"cat\")\n dog = self.animal_factory.create(\"dog\")\n\n self.assertEquals(self.cat_class, cat.__class__)\n self.assertEquals(self.dog_class, dog.__class__)\n\n self.assertEquals(\"Meow\", cat.speak())\n self.assertEquals(\"Woof\", dog.speak())", "def object_detection(self):\r\n pass", "def test_audio_features(self):\n\n # 1ehPJRt49h6N0LoryqKZXq, 8737: How Far I'll Go (Alessia Cara Version) by Alessia Cara\n # 2fGFaTDbE8aS4f31fM0XE4, 5037: Pop 101 (feat. Anami Vice) by Marianas Trench\n targets = {8737: {'danceability': 0.317,\n 'energy': 0.562,\n 'key': 9,\n 'loudness': -9.609,\n 'mode': 1,\n 'speechiness': 0.395,\n 'acousticness': 0.124,\n 'instrumentalness': 0.000144,\n 'liveness': 0.0667,\n 'valence': 0.127,\n 'tempo': 181.100,\n 'duration_ms': 175507,\n 'time_signature': 4,\n },\n 5037: {'danceability': 0.756,\n 'energy': 0.658,\n 'key': 11,\n 'loudness': -6.128,\n 'mode': 0,\n 'speechiness': 0.202,\n 'acousticness': 0.0581,\n 'instrumentalness': 0,\n 'liveness': 0.0674,\n 'valence': 0.640,\n 'tempo': 120.018,\n 'duration_ms': 247829,\n 'time_signature': 4,\n },\n }\n\n results = {track.i_id: track for track in self.tracks if track.i_id in targets}\n\n for target, expecteds in targets.iteritems():\n result = results[target]\n for key, expected in expecteds.iteritems():\n self.assertEqual(result.__getattr__(key), expected)", "def test_connect(self):\n mediator = Mediator()\n mediator.connect(\"set_dog_sound\", self.dog.set_sound)\n self.assertEquals([self.dog.set_sound], mediator.signals[\"set_dog_sound\"])\n\n mediator.connect(\"set_cat_sound\", self.cat.set_sound)\n self.assertEquals([self.cat.set_sound], mediator.signals[\"set_cat_sound\"])", "def generate_animal_report(self):\n print('ANIMALS IN ' + self.name)\n for species, count in self.animals.items():\n print(f'{species}: {count}')", "def guess_cuewords():\n\n if t_word[:3] == 'nie':\n create_negation_frame()\n create_target_focus_scope()\n\n if t_word[:3] == 'nic':\n create_negation_frame()\n create_target_focus_scope()", "def eye_like(self):\n raise NotImplementedError", "def set_to_default_male_voice(sim_info: SimInfo) -> None:\n from sims4communitylib.utils.sims.common_age_species_utils import CommonAgeSpeciesUtils\n from sims4communitylib.utils.sims.common_species_utils import CommonSpeciesUtils\n from sims4communitylib.utils.sims.common_age_utils import CommonAgeUtils\n if CommonAgeSpeciesUtils.is_teen_adult_or_elder_human(sim_info):\n CommonSimVoiceUtils.set_voice_actor(sim_info, CommonVoiceActorType.ADULT_HUMAN_MASCULINE_1)\n elif CommonAgeSpeciesUtils.is_child_human(sim_info):\n CommonSimVoiceUtils.set_voice_actor(sim_info, CommonVoiceActorType.CHILD_HUMAN_AMBIGUOUS_1)\n elif CommonAgeSpeciesUtils.is_toddler_human(sim_info):\n CommonSimVoiceUtils.set_voice_actor(sim_info, CommonVoiceActorType.TODDLER_HUMAN_AMBIGUOUS_1)\n elif CommonSpeciesUtils.is_large_dog(sim_info) and CommonAgeUtils.is_teen_adult_or_elder(sim_info):\n CommonSimVoiceUtils.set_voice_actor(sim_info, CommonVoiceActorType.ADULT_DOG_AMBIGUOUS_1)\n elif CommonSpeciesUtils.is_large_dog(sim_info) and CommonAgeUtils.is_child(sim_info):\n CommonSimVoiceUtils.set_voice_actor(sim_info, CommonVoiceActorType.CHILD_DOG_AMBIGUOUS_1)\n elif CommonSpeciesUtils.is_small_dog(sim_info) and CommonAgeUtils.is_teen_adult_or_elder(sim_info):\n CommonSimVoiceUtils.set_voice_actor(sim_info, CommonVoiceActorType.ADULT_DOG_AMBIGUOUS_1)\n elif CommonSpeciesUtils.is_small_dog(sim_info) and CommonAgeUtils.is_child(sim_info):\n CommonSimVoiceUtils.set_voice_actor(sim_info, CommonVoiceActorType.CHILD_DOG_AMBIGUOUS_1)\n elif CommonSpeciesUtils.is_cat(sim_info) and CommonAgeUtils.is_teen_adult_or_elder(sim_info):\n CommonSimVoiceUtils.set_voice_actor(sim_info, CommonVoiceActorType.ADULT_CAT_AMBIGUOUS_1)\n elif CommonSpeciesUtils.is_cat(sim_info) and CommonAgeUtils.is_child(sim_info):\n CommonSimVoiceUtils.set_voice_actor(sim_info, CommonVoiceActorType.CHILD_CAT_AMBIGUOUS_1)\n elif CommonSpeciesUtils.is_fox(sim_info) and CommonAgeUtils.is_teen_adult_or_elder(sim_info):\n CommonSimVoiceUtils.set_voice_actor(sim_info, CommonVoiceActorType.ADULT_FOX_AMBIGUOUS_1)", "def aging_landscape(self):\n for herb in self.herb_pop:\n herb.aging_animal()\n herb.fitness_animal()\n\n for carn in self.carn_pop:\n carn.aging_animal()\n carn.fitness_animal()", "def test_no_abstract_syntax_match(self):\n\n def handle(event):\n ds = Dataset()\n ds.PatientName = \"Test^test\"\n return 0x0000, ds\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(DisplaySystem)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_GET, handle)]\n )\n\n ae.add_requested_context(DisplaySystem)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n msg = (\n r\"No presentation context for 'Verification SOP Class' has been \"\n r\"accepted by the peer \"\n r\"for the SCU role\"\n )\n with pytest.raises(ValueError, match=msg):\n assoc.send_n_get(None, Verification, None)\n\n assoc.release()\n assert assoc.is_released\n\n scp.shutdown()", "def is_artificial(self):\n\t\treturn 0", "def check_multi_animal_status(self) -> None:\n multi_animal_id_lst = []\n if not self.config.has_section(\"Multi animal IDs\"):\n for animal in range(self.animal_cnt):\n multi_animal_id_lst.append(\"Animal_\" + str(animal + 1))\n multi_animal_status = False\n\n else:\n multi_animal_id_str = self.read_config_entry(\n config=self.config,\n section=\"Multi animal IDs\",\n option=\"id_list\",\n data_type=\"str\",\n )\n multi_animal_id_lst = [x.lstrip() for x in multi_animal_id_str.split(\",\")]\n multi_animal_id_lst = [x for x in multi_animal_id_lst if x != \"None\"]\n if (self.animal_cnt > 1) and (len(multi_animal_id_lst) > 1):\n multi_animal_status = True\n else:\n for animal in range(self.animal_cnt):\n multi_animal_id_lst.append(\"Animal_{}\".format(str(animal + 1)))\n multi_animal_status = False\n\n self.multi_animal_status = multi_animal_status\n self.multi_animal_id_list = multi_animal_id_lst[: self.animal_cnt]", "def units_which_can_be_built(self):\n what_can_be_built = [Pikeman.kind]\n player = self.player\n if player.age in ('bronze age', 'iron age'):\n shields = BronzeShields\n swords = BronzeSwords\n if all(s.name in player.things_researched for s in (shields, swords)):\n what_can_be_built.append(Swordsman.kind)\n return what_can_be_built", "def visit_equipment(self, equipment):", "def find_animal(self, img_path: str, image: LoadedImage) -> Dict[AnimalType, List[yolo.BoundBox]]:\n found_animals = yolo.classify_image(image=image, img_path=img_path, yolov3=self.yolo_model,\n animals_to_find=list(self.models.keys()), save_bbox=False)\n return found_animals", "def test_no_abstract_syntax_match(self):\n\n def handle(event):\n ds = Dataset()\n ds.PatientName = \"Test^test\"\n return 0x0000, ds\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(ModalityPerformedProcedureStep)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_SET, handle)]\n )\n\n ae.add_requested_context(ModalityPerformedProcedureStep)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n msg = (\n r\"No presentation context for 'Verification SOP Class' has been \"\n r\"accepted by the peer \"\n r\"for the SCU role\"\n )\n with pytest.raises(ValueError, match=msg):\n assoc.send_n_set(None, Verification, None)\n\n assoc.release()\n assert assoc.is_released\n\n scp.shutdown()", "def get_random_gestures(_):\n random_seed = random.getrandbits(32)\n gesture = 'monkey,%s' % str(random_seed)\n return [gesture]", "def test_signal(self):\n mediator = Mediator()\n mediator.connect(\"set_dog_sound\", self.dog.set_sound)\n mediator.connect(\"set_cat_sound\", self.cat.set_sound)\n mediator.signal(\"set_dog_sound\", \"woof\")\n mediator.signal(\"set_cat_sound\", \"meow\")\n\n self.assertEquals(\"woof\", self.dog.sound)\n self.assertEquals(\"meow\", self.cat.sound)", "def is_sound_inference(rule: InferenceRule) -> bool:\r\n # Task 4.3\r", "def isProteic(self):\n from MolKit.PDBresidueNames import AAnames\n\n self.AARes = [x for x in self.residues if x.type in AAnames]\n\n water = [x for x in self.residues if x.type in ['HOH', 'WAT']]\n\n if len(self.AARes) and len(self.AARes)+len(water) == len(self.residues):\n return True\n else:\n return False", "def _detect(self):\n return True", "def given_test_cases(self):\n self.assertTrue(anagram_finder(\"listen\", \"silent\"))\n self.assertTrue(anagram_finder(\"triangle\", \"integral\"))\n self.assertFalse(anagram_finder(\"apple\", \"pabble\"))", "def simple_disambiguation(images, senses, labels, image_column, verb_types):\n accuracy = {'motion': [0, 0], 'non_motion': [0, 0]}\n for _, image_row in enumerate(images.itertuples()):\n i_t = np.array(getattr(image_row, image_column))\n image_id = image_row.Index\n verbs = labels.query('image == @image_id')['lemma'].to_frame()\n\n for _, verb_row in enumerate(verbs.itertuples()):\n verb = verb_row.lemma\n filtered_senses = senses.query('lemma == @verb')\n # Cosine similarity between image i_t and every other sense s_t\n dot_prod = filtered_senses['e_combined'].apply(\n lambda s_t: -1 if np.all(i_t == None) else np.dot(i_t, s_t))\n s_hat = dot_prod.values.argmax()\n if np.max(dot_prod) == -1: # the image can't be represented\n continue\n pred_sense_id = filtered_senses.iloc[s_hat]['sense_num']\n sense_id = labels.query('image == @image_id and lemma == @verb')['sense_chosen'].iloc[0]\n\n # Accuracy statistics\n if verb in verb_types['motion']:\n if sense_id == pred_sense_id:\n accuracy['motion'][1] += 1\n else:\n accuracy['motion'][0] += 1\n elif verb in verb_types['non_motion']:\n if sense_id == pred_sense_id:\n accuracy['non_motion'][1] += 1\n else:\n accuracy['non_motion'][0] += 1\n else:\n raise ValueError('Unknown verb type')\n\n print('%s representation, sense accuracy:' % image_column)\n print('Motion verbs: %s' % ((accuracy['motion'][1] / (accuracy['motion'][0] + accuracy['motion'][1])) * 100))\n print('Non-motion verbs: %s' % ((accuracy['non_motion'][1] / (accuracy['non_motion'][0] + accuracy['non_motion'][1])) * 100))\n print('-')", "def analyse(self):\n pass", "def testabilities(self):\n for ability in WeaponAbility.typelist:\n a = WeaponAbility(ability)\n self.assert_(ability in str(a))\n self.assertTrue(isinstance(a.AC, int))\n self.assertTrue(isinstance(a.description(), str))", "def check_directionality_viable(self):\n\n direction_viable = True\n nose_cords, ear_left_cords, ear_right_cords = [], [], []\n for animal_name in self.animal_bp_dict.keys():\n for bp_cord in [\"X_bps\", \"Y_bps\"]:\n bp_list = self.animal_bp_dict[animal_name][bp_cord]\n for bp_name in bp_list:\n bp_name_components = bp_name.split(\"_\")\n bp_name_components = [x.lower() for x in bp_name_components]\n if \"nose\" in bp_name_components:\n nose_cords.append(bp_name)\n elif (\"ear\" in bp_name_components) and (\n \"left\" in bp_name_components\n ):\n ear_left_cords.append(bp_name)\n elif (\"ear\" in bp_name_components) and (\n \"right\" in bp_name_components\n ):\n ear_right_cords.append(bp_name)\n else:\n pass\n\n for cord in [nose_cords, ear_left_cords, ear_right_cords]:\n if len(cord) != len(self.animal_bp_dict.keys()) * 2:\n direction_viable = False\n\n if direction_viable:\n nose_cords = [\n nose_cords[i * 2 : (i + 1) * 2]\n for i in range((len(nose_cords) + 2 - 1) // 2)\n ]\n ear_left_cords = [\n ear_left_cords[i * 2 : (i + 1) * 2]\n for i in range((len(ear_left_cords) + 2 - 1) // 2)\n ]\n ear_right_cords = [\n ear_right_cords[i * 2 : (i + 1) * 2]\n for i in range((len(ear_right_cords) + 2 - 1) // 2)\n ]\n\n return direction_viable, nose_cords, ear_left_cords, ear_right_cords", "def test_basic_intra_format(self):\n buff_interactions = isambard.buff.find_intra_ampal(\n self.topo, self.ff.distance_cutoff)\n for _ in range(100):\n a, b = random.choice(buff_interactions)\n self.assertTrue(type(a) is isambard.ampal.Atom)\n self.assertTrue(type(b) is isambard.ampal.Atom)\n self.assertTrue(a != b)", "def test_unknown_identifier(self):\n # Lists are not supported and should cause an error\n with self.assertRaises(TypeError):\n avp.AVP([0, 3])", "def get_matching_animals():\n current_user = get_jwt_identity()\n\n if not current_user:\n print('uri=/login error=\"Missing user\"')\n return jsonify(message=\"Missing user\"), 400\n\n try:\n username = User.get_username_by_id(current_user)\n user_detail = UserDetail.get_printable_user_detail(username)\n dispositions = UserDetail.get_user_dispositions(username)\n animal_preference = AnimalClass.get_animal_class_by_name(Adopter.get_animal_preference(username))\n print('User detail {}'.format(user_detail))\n print('Dispositions {}'.format(dispositions))\n print('Animal preference {}'.format(animal_preference.animal_class))\n\n matching_animals = Animal.get_animals_by_type_and_disposition(animal_preference, dispositions)\n\n return jsonify(message='{}'.format(json.dumps(matching_animals))), 200\n except Exception as e:\n print(e)\n return jsonify(message='{}'.format(e)), 501", "def process_proposes(self):\n\n # Check which I'm still interested in, could be learned\n accepted = defaultdict(bool)\n for proposal in self.interaction_proposals:\n proposal: Tuple[Guest, str]\n error_prob = .05\n if random.random() < error_prob:\n accepted[proposal] = True\n else:\n if self.knowledge[(proposal[0].role, proposal[1])] >= 0:\n accepted[proposal] = True\n\n self.interaction_proposals = filter(lambda x: accepted[x], self.interaction_proposals)\n # self.interaction_proposals = filter(lambda x: False, self.interaction_proposals)\n\n # self.interaction_proposals = list(self.interaction_proposals)\n\n # Check which the other is still interested in, don't touch\n self.interaction_proposals = filter(lambda x: (self, x[1]) in x[0].interaction_proposals,\n self.interaction_proposals)\n # TODO maybe split this up?\n self.interaction_proposals = list(self.interaction_proposals)\n\n if len(self.interaction_proposals) > 0:\n # Choose a random interaction from the approved ones\n if len(self.interaction_proposals) > 1:\n print(self.unique_id, self.interaction_proposals)\n\n other: Guest\n interaction: str\n other, interaction = random.choice(self.interaction_proposals)\n # print(interaction)\n\n getattr(self.model, interaction)(self, other)\n other.interaction_proposals = []", "def asking_irving(actor, y, ctxt) :\n # current words have already been init'd since we're parsing\n res = ctxt.parser.run_parser(\"somelore\",\n ctxt.parser.transform_text_to_words(y),\n ctxt)\n if len(res) == 1 :\n desc = ctxt.world[Description(res[0][0].value)]\n if desc :\n ctxt.write(desc)\n else : # just in case, for debugging\n ctxt.write(\"\"\"Irving Q. Tep has nothing to say about that.\"\"\")\n elif len(res) > 1 :\n raise Ambiguous(AskingAbout(actor, \"Irving Q. Tep\", X), {X : [r[0].value for r in res]}, {X : \"somelore\"})\n else :\n res = ctxt.parser.run_parser(\"something\",\n ctxt.parser.transform_text_to_words(y),\n ctxt)\n if not res :\n ctxt.write(\"\"\"Irving Q. Tep has nothing to say about that.\"\"\")\n elif len(res) == 1 :\n ctxt.actionsystem.run_action(Examining(actor, res[0][0].value), ctxt)\n else :\n raise Ambiguous(Examining(actor, X), {X : [r[0].value for r in res]}, {X : \"something\"})\n raise ActionHandled()", "async def tapir(self):\n tapir_list = self.config.get('tapirs', [])\n tapir = tapir_list[random.randrange(len(tapir_list))]\n try:\n await self.bot.say(tapir)\n except:\n await self.bot.whisper(tapir)", "def determine_available_voice_types(sim_info: SimInfo) -> Tuple[CommonVoiceActorType]:\n from sims4communitylib.utils.sims.common_age_species_utils import CommonAgeSpeciesUtils\n from sims4communitylib.utils.sims.common_species_utils import CommonSpeciesUtils\n from sims4communitylib.utils.sims.common_age_utils import CommonAgeUtils\n if CommonAgeSpeciesUtils.is_teen_adult_or_elder_human(sim_info):\n result: Tuple[CommonVoiceActorType] = (\n CommonVoiceActorType.MUTE,\n CommonVoiceActorType.ADULT_HUMAN_AMBIGUOUS_1,\n CommonVoiceActorType.ADULT_HUMAN_FEMININE_1,\n CommonVoiceActorType.ADULT_HUMAN_FEMININE_2,\n CommonVoiceActorType.ADULT_HUMAN_MASCULINE_1,\n CommonVoiceActorType.ADULT_HUMAN_MASCULINE_2,\n CommonVoiceActorType.ADULT_HUMAN_MASCULINE_3,\n CommonVoiceActorType.KYLO_REN_1,\n CommonVoiceActorType.REY_1,\n CommonVoiceActorType.HONDO_OHNAKA_1,\n )\n elif CommonAgeSpeciesUtils.is_child_human(sim_info):\n result: Tuple[CommonVoiceActorType] = (\n CommonVoiceActorType.MUTE,\n CommonVoiceActorType.CHILD_HUMAN_AMBIGUOUS_1,\n CommonVoiceActorType.CHILD_HUMAN_AMBIGUOUS_2,\n CommonVoiceActorType.KYLO_REN_1,\n CommonVoiceActorType.REY_1,\n CommonVoiceActorType.HONDO_OHNAKA_1,\n )\n elif CommonAgeSpeciesUtils.is_toddler_human(sim_info):\n result: Tuple[CommonVoiceActorType] = (\n CommonVoiceActorType.MUTE,\n CommonVoiceActorType.TODDLER_HUMAN_AMBIGUOUS_1,\n )\n elif CommonSpeciesUtils.is_large_dog(sim_info) and CommonAgeUtils.is_teen_adult_or_elder(sim_info):\n result: Tuple[CommonVoiceActorType] = (\n CommonVoiceActorType.MUTE,\n CommonVoiceActorType.ADULT_DOG_AMBIGUOUS_1,\n CommonVoiceActorType.ADULT_DOG_AMBIGUOUS_2,\n CommonVoiceActorType.ADULT_DOG_AMBIGUOUS_3,\n CommonVoiceActorType.ADULT_DOG_AMBIGUOUS_4\n )\n elif CommonSpeciesUtils.is_large_dog(sim_info) and CommonAgeUtils.is_child(sim_info):\n result: Tuple[CommonVoiceActorType] = (\n CommonVoiceActorType.MUTE,\n CommonVoiceActorType.CHILD_DOG_AMBIGUOUS_1,\n )\n elif CommonSpeciesUtils.is_small_dog(sim_info) and CommonAgeUtils.is_teen_adult_or_elder(sim_info):\n result: Tuple[CommonVoiceActorType] = (\n CommonVoiceActorType.MUTE,\n CommonVoiceActorType.ADULT_DOG_AMBIGUOUS_1,\n CommonVoiceActorType.ADULT_DOG_AMBIGUOUS_2,\n CommonVoiceActorType.ADULT_DOG_AMBIGUOUS_3,\n CommonVoiceActorType.ADULT_DOG_AMBIGUOUS_4\n )\n elif CommonSpeciesUtils.is_small_dog(sim_info) and CommonAgeUtils.is_child(sim_info):\n result: Tuple[CommonVoiceActorType] = (\n CommonVoiceActorType.MUTE,\n CommonVoiceActorType.CHILD_DOG_AMBIGUOUS_1,\n )\n elif CommonSpeciesUtils.is_cat(sim_info) and CommonAgeUtils.is_teen_adult_or_elder(sim_info):\n result: Tuple[CommonVoiceActorType] = (\n CommonVoiceActorType.MUTE,\n CommonVoiceActorType.ADULT_CAT_AMBIGUOUS_1,\n CommonVoiceActorType.ADULT_CAT_AMBIGUOUS_2,\n )\n elif CommonSpeciesUtils.is_cat(sim_info) and CommonAgeUtils.is_child(sim_info):\n result: Tuple[CommonVoiceActorType] = (\n CommonVoiceActorType.MUTE,\n CommonVoiceActorType.CHILD_CAT_AMBIGUOUS_1,\n )\n elif CommonSpeciesUtils.is_fox(sim_info) and CommonAgeUtils.is_teen_adult_or_elder(sim_info):\n result: Tuple[CommonVoiceActorType] = (\n CommonVoiceActorType.MUTE,\n CommonVoiceActorType.ADULT_FOX_AMBIGUOUS_1,\n )\n else:\n result: Tuple[CommonVoiceActorType] = (\n CommonVoiceActorType.MUTE,\n )\n return result", "def test_occupancy_1(self):\n human_detector_inst = HumanDetector(\n find_humans_from_video_file_name='videos/occupancy_test_videos/occupancy1.mp4',\n use_pi_camera=False, open_display=False)\n self.assertEqual(human_detector_inst.perform_job(), None)\n human_centroid_dict = human_detector_inst.get_human_centroid_dict()\n self.assertEqual(len(human_centroid_dict), 9)\n self.assertEqual(human_centroid_dict[0].direction, Direction.ENTER)\n self.assertEqual(human_centroid_dict[1].direction, Direction.EXIT)\n self.assertEqual(human_centroid_dict[2].direction, Direction.EXIT)\n self.assertEqual(human_centroid_dict[3].direction, Direction.ENTER)\n self.assertEqual(human_centroid_dict[4].direction, Direction.ENTER)\n self.assertEqual(human_centroid_dict[5].direction, Direction.ENTER)\n self.assertEqual(human_centroid_dict[6].direction, Direction.ENTER)\n self.assertEqual(human_centroid_dict[7].direction, Direction.EXIT)\n self.assertEqual(human_centroid_dict[8].direction, Direction.ENTER)\n self.assertEqual(SendReceiveMessages().get_face_detected_count_locally(), 3)\n human_detector_inst.clean_up()\n self.__cleanup()", "def eat(self, jungle: Jungle):", "def test_which_animals(self, select_relationships, concept_is_species, \n filter_by_concept_type):\n # Set up mocks and test data\n parsed_query = Mock(name='parsed_query',\n text='which animals eat bugs',\n subject_name='animals',\n object_name='bugs',\n relationship_type_name='eat',\n relationship_number=3,\n relationship_negation=False)\n fact_query = FactQuery(parsed_query=parsed_query)\n\n mock_match_1 = Mock(name='match_1',\n subject=Mock(concept_name='subject_1'))\n mock_match_2 = Mock(name='match_2',\n subject=Mock(concept_name='subject_2'))\n select_relationships.return_value = [mock_match_1, mock_match_2]\n concept_is_species.return_value = False\n filter_by_concept_type.return_value = [mock_match_1, mock_match_2]\n\n # Make call\n results = fact_query._which_animal_query()\n\n # Verify results\n self.assertEqual(set(['subject_1', 'subject_2']), set(results))\n\n # Verify mocks\n select_relationships.assert_called_once_with(\n 'eat', object_name='bugs', relationship_number=3)\n\n call_args_list = concept_is_species.call_args_list\n self.assertEqual(2, len(call_args_list))\n self.assertEqual(call('bugs'), call_args_list[0])\n self.assertEqual(call('animals'), call_args_list[1])\n\n filter_by_concept_type.assert_called_once_with(\n [mock_match_1, mock_match_2], 'animals', relationship_attr='subject')", "def is_interesting(self):\n current_time = self.time_in_air\n if self.last_point_name == 'home':\n new_name = 'node 1'\n else:\n new_name = 'node {0}'.format(int(self.last_point_name.split()[1]) + 1)\n new_sense = self.lidars[0].radius if isinf(self.lidars[0].get_sense()) else self.lidars[0].get_sense()\n\n if abs(new_sense - self.last_check) >= 9 * self.lidars[0].radius / 10:\n self.slam.add_point(name=new_name, data={'angle': self.yaw,\n 'duration': self.get_duration(current_time=current_time),\n 'time': current_time})\n self.slam.add_edge(from_node=self.last_point_name, to_node=new_name, distance=self.distance)\n self.distance = 0\n self.last_point_name = new_name", "def _recognize_face(unknown_encoding, loaded_encodings):\n boolean_matches = face_recognition.compare_faces(\n loaded_encodings[\"encodings\"], unknown_encoding\n )\n votes = Counter(\n name\n for match, name in zip(boolean_matches, loaded_encodings[\"names\"])\n if match\n )\n if votes:\n return votes.most_common(1)[0][0]", "def set_to_default_female_voice(sim_info: SimInfo) -> None:\n from sims4communitylib.utils.sims.common_age_species_utils import CommonAgeSpeciesUtils\n from sims4communitylib.utils.sims.common_species_utils import CommonSpeciesUtils\n from sims4communitylib.utils.sims.common_age_utils import CommonAgeUtils\n if CommonAgeSpeciesUtils.is_teen_adult_or_elder_human(sim_info):\n CommonSimVoiceUtils.set_voice_actor(sim_info, CommonVoiceActorType.ADULT_HUMAN_FEMININE_1)\n elif CommonAgeSpeciesUtils.is_child_human(sim_info):\n CommonSimVoiceUtils.set_voice_actor(sim_info, CommonVoiceActorType.CHILD_HUMAN_AMBIGUOUS_1)\n elif CommonAgeSpeciesUtils.is_toddler_human(sim_info):\n CommonSimVoiceUtils.set_voice_actor(sim_info, CommonVoiceActorType.TODDLER_HUMAN_AMBIGUOUS_1)\n elif CommonSpeciesUtils.is_large_dog(sim_info) and CommonAgeUtils.is_teen_adult_or_elder(sim_info):\n CommonSimVoiceUtils.set_voice_actor(sim_info, CommonVoiceActorType.ADULT_DOG_AMBIGUOUS_1)\n elif CommonSpeciesUtils.is_large_dog(sim_info) and CommonAgeUtils.is_child(sim_info):\n CommonSimVoiceUtils.set_voice_actor(sim_info, CommonVoiceActorType.CHILD_DOG_AMBIGUOUS_1)\n elif CommonSpeciesUtils.is_small_dog(sim_info) and CommonAgeUtils.is_teen_adult_or_elder(sim_info):\n CommonSimVoiceUtils.set_voice_actor(sim_info, CommonVoiceActorType.ADULT_DOG_AMBIGUOUS_1)\n elif CommonSpeciesUtils.is_small_dog(sim_info) and CommonAgeUtils.is_child(sim_info):\n CommonSimVoiceUtils.set_voice_actor(sim_info, CommonVoiceActorType.CHILD_DOG_AMBIGUOUS_1)\n elif CommonSpeciesUtils.is_cat(sim_info) and CommonAgeUtils.is_teen_adult_or_elder(sim_info):\n CommonSimVoiceUtils.set_voice_actor(sim_info, CommonVoiceActorType.ADULT_CAT_AMBIGUOUS_1)\n elif CommonSpeciesUtils.is_cat(sim_info) and CommonAgeUtils.is_child(sim_info):\n CommonSimVoiceUtils.set_voice_actor(sim_info, CommonVoiceActorType.CHILD_CAT_AMBIGUOUS_1)\n elif CommonSpeciesUtils.is_fox(sim_info) and CommonAgeUtils.is_teen_adult_or_elder(sim_info):\n CommonSimVoiceUtils.set_voice_actor(sim_info, CommonVoiceActorType.ADULT_FOX_AMBIGUOUS_1)", "def detect_person(snap):\n pass", "def hit(self):", "def test_AKs_Properties(self):\n self.assertEqual(self.hand.premInd, 1)\n self.assertEqual(self.hand.connectedInd, 1)\n self.assertEqual(self.hand.suitedInd, 1)", "def seallable(\n sequence,\n medials={\n 'j', 'w', 'jw', 'wj', 'i̯', 'u̯', 'i̯u̯', 'u̯i̯', 'iu', 'ui', 'y', 'ɥ', 'l',\n 'lj', 'lʲ', 'r', 'rj', 'rʲ', 'ʐ', 'ʑ', 'ʂ', 'ʂ'},\n vowels=VOWELS,\n tones=TONES,\n diacritics=DIACRITICS,\n stress=STRESS,\n cldf=True,\n unknown=REPLACEMENT,\n ):\n if not sequence:\n raise ValueError('empty sequence passed to function')\n if len(sequence) > 5:\n return len(sequence) * [unknown]\n\n cv = soundclass(sequence, model='cv', diacritics=diacritics, stress=stress, cldf=cldf)\n\n ini, med, nuc, cod, ton = 5 * [False]\n\n if 3 <= len(sequence) <= 5:\n # first element must be the initial\n ini = 'i' if cv[0] == 'C' else '?'\n # last element must be tone\n ton = 't' if cv[-1] == 'T' else '?'\n # medial and coda can be missing\n med, nuc, cod = 3 * [False]\n\n # scenario the sequence has 5 elements, all slots must be filled\n if len(sequence) == 5:\n med = 'm' if sequence[1] in medials else '?'\n cod = 'c' if cv[3] == 'C' else '?'\n nuc = 'n' if cv[2] == 'V' else '?'\n \n # scenario the sequence has four slots filled, one must be missing, either\n # coda or medial\n elif len(sequence) == 4:\n med = 'm' if sequence[1] in medials else False\n if not med:\n nuc = 'n' if cv[1] == 'V' else '?'\n cod = 'c' if cv[2] == 'C' else '?'\n else:\n nuc = 'n' if cv[2] == 'V' else '?'\n\n # scenario where the sequence has three slots filled, \n # case 1 : \"ma¹³\". The second token must be a vowel\n # case 2 : \"am¹³\". The first token must be a vowel\n elif len(sequence) == 3:\n if cv[1] == 'V':\n ini = 'i' if cv[0] == 'C' else '?'\n nuc = 'n'\n elif cv[0] == 'V':\n ini = False\n nuc = 'n'\n cod = 'c' if cv[1] == 'C' else '?'\n\n # scenario with two elements only, means that the first element should be a\n # consonant\n elif len(sequence) == 2:\n nuc = 'n' if cv[0] == 'V' else '?'\n ton = 't' if cv[1] == 'T' else '?'\n\n # if only one segment is given, it must be the vowel\n else:\n nuc = 'n' if cv[0] == 'V' else '?'\n\n return [s for s in [ini, med, nuc, cod, ton] if s]", "def _say_prudent(self, text):\n if datetime.now() - self.last_speech > timedelta(seconds=5):\n DetectionApp._say(text)\n self.last_speech = datetime.now()\n self._seen_faces.clear()", "def speak(self):\n print(\"meow!\")", "def detect_cuewords():\n\n # cuewords\n\n if t_word[:2] == 'ni':\n create_negation_frame()\n create_target_focus_scope()\n\n if t_word[:4] == 'kein':\n create_negation_frame()\n create_target_focus_scope()\n\n if t_word[:4] == 'nein':\n create_negation_frame()\n create_target_focus_scope()", "def test_occupancy_6(self):\n human_detector_inst = HumanDetector(\n find_humans_from_video_file_name='videos/occupancy_test_videos/occupancy6.mp4',\n use_pi_camera=False, open_display=False)\n self.assertEqual(human_detector_inst.perform_job(), None)\n human_centroid_dict = human_detector_inst.get_human_centroid_dict()\n self.assertEqual(len(human_centroid_dict), 12)\n self.assertEqual(human_centroid_dict[0].direction, Direction.ENTER)\n self.assertEqual(human_centroid_dict[1].direction, Direction.ENTER)\n self.assertEqual(human_centroid_dict[2].direction, Direction.ENTER)\n self.assertEqual(human_centroid_dict[3].direction, Direction.ENTER)\n self.assertEqual(human_centroid_dict[4].direction, Direction.ENTER)\n self.assertEqual(human_centroid_dict[5].direction, Direction.EXIT)\n self.assertEqual(human_centroid_dict[6].direction, Direction.ENTER)\n self.assertEqual(human_centroid_dict[7].direction, Direction.EXIT)\n self.assertEqual(human_centroid_dict[8].direction, Direction.EXIT)\n self.assertEqual(human_centroid_dict[9].direction, Direction.EXIT)\n self.assertEqual(human_centroid_dict[10].direction, Direction.EXIT)\n self.assertEqual(human_centroid_dict[11].direction, Direction.ENTER)\n self.assertEqual(SendReceiveMessages().get_face_detected_count_locally(), 2)\n human_detector_inst.clean_up()\n self.__cleanup()", "def _view_animal(self):\n print(repr(self.animals[self.park_location]))", "def rhymes(self,a,b):\r\n \r\n a=a.lower()\r\n b=b.lower()\r\n if(a in self._words): ##check if A is in the dict\r\n checkA=1\r\n soundA=self._pronun[a]\r\n lenA=len(soundA)\r\n #print(soundA)\r\n else :\r\n return False\r\n if(b in self._words): ##check if B is in dict\r\n checkB=1\r\n soundB=self._pronun[b]\r\n lenB=len(soundB)\r\n #print(soundB)\r\n else:\r\n return False\r\n \r\n if((checkA==1) and (checkB==1)): ##if both in dict then move ahead\r\n #print(lenA,lenB)\r\n \r\n for countA in range(lenA):\r\n if soundA[countA][0][0] not in ['A','E','I','O','U']:\r\n soundA[countA]=soundA[countA][1:]\r\n\r\n for countA in range(lenA):\r\n soundA[countA]=''.join(soundA[countA])\r\n \r\n # print(soundA)\r\n \r\n\r\n for countB in range(lenB):\r\n if soundB[countB][0][0] not in ['A','E','I','O','U']:\r\n soundB[countB]=soundB[countB][1:]\r\n\r\n for countB in range(lenB):\r\n soundB[countB]=''.join(soundB[countB])\r\n\r\n #print(soundB)\r\n \r\n else:\r\n return False\r\n\r\n rhyme_count=0\r\n \r\n for countA in range(lenA):\r\n for countB in range(lenB):\r\n if((soundA[countA].endswith(soundB[countB]))==True):\r\n #print('substring matched')\r\n rhyme_count=rhyme_count+1\r\n\r\n for countB in range(lenB):\r\n for countA in range(lenA):\r\n if((soundB[countB].endswith(soundA[countA]))==True):\r\n #print('substring matched')\r\n rhyme_count=rhyme_count+1\r\n \r\n if(rhyme_count>0):\r\n #print('True') \r\n return True\r\n else:\r\n # print('False')\r\n return False", "def learn_vowels(self, data=None):\n #pdb.set_trace()\n if not data:\n data = self.memory\n # find acoustic prototypes by clustering over stored acoustic reps\n raw_data = data.reshape(4 * len(self.stems), 2)\n ac_vowels, ac_spread = vq.kmeans(raw_data, 4)\n # find articulatory reps by comparing synthesized output vowels to\n # acoustic prototypes\n # start with candidate list of \"all possible\" articulations\n tmp_ar = N.empty((1, 3))\n rd = 0.0\n for hi in [0.0, 1.0]:\n for bk in [0.0, 1.0]:\n tmp_ar = N.vstack((tmp_ar, N.array([hi, bk, rd])))\n tmp_ar = tmp_ar[1:]\n while len(self.vowel_map) < 4:\n # no noise (since this shouldn't be running through the \"mouth\")\n tmp_ac = self.perceive(self.acoustify(tmp_ar))\n for v in ac_vowels:\n dists = N.sqrt(N.sum((v - tmp_ac)**2, axis=1))\n d = 0\n while True:\n if dists[d] < (2 * ac_spread):\n # found an articulatory prototype\n self.vowel_map[tuple(v)] = tmp_ar[d]\n # remove it from the candidate list\n tmp_ar = N.vstack((tmp_ar[:d], tmp_ar[d + 1:]))\n tmp_ac = N.vstack((tmp_ac[:d], tmp_ac[d + 1:]))\n break\n d += 1\n if d == len(dists):\n # take the best of the bad ones\n index = N.argmin(dists)\n self.vowel_map[tuple(v)] = tmp_ar[index]\n break\n self.vowel_spread = ac_spread\n return self.vowel_map", "def test_identify_tone_01():\n # Test all seven tones.\n # Test all vowels.\n # Test final ⁿ, up to two final and three initial consonants.\n # Test diacritic on first of two vowels.\n assert U.identify_tone('chhiong') == ('yīnpíng', '○')\n assert U.identify_tone('kui') == ('yīnpíng', '○')\n assert U.identify_tone('gîm') == ('yángpíng', '○')\n assert U.identify_tone('bî') == ('yángpíng', '○')\n assert U.identify_tone('bêng') == ('yángpíng', '○')\n assert U.identify_tone('ngớⁿ') == ('yīnshǎng', '●')\n assert U.identify_tone('óng') == ('yīnshǎng', '●')\n assert U.identify_tone('àm') == ('yīnqù', '●')\n assert U.identify_tone('pòan') == ('yīnqù', '●')\n assert U.identify_tone('sòe') == ('yīnqù', '●')\n assert U.identify_tone('bān') == ('yángqù', '●')\n assert U.identify_tone('iā') == ('yángqù', '●')\n assert U.identify_tone('ngơ̄ⁿ') == ('yángqù', '●')\n assert U.identify_tone('sek') == ('yīnrù', '●')\n assert U.identify_tone('khip') == ('yīnrù', '●')\n assert U.identify_tone('to̍k') == ('yángrù', '●')\n assert U.identify_tone('bu̍t') == ('yángrù', '●')", "def test_readers_remeber_spawned_spectra(self):\n pass", "def get_experiment_speaker_info(db_root):\n seen_speakers = ['VCTK-speaker-p225-female',\n 'VCTK-speaker-p226-male',\n 'VCTK-speaker-p227-male',\n 'VCTK-speaker-p228-female',\n 'VCTK-speaker-p229-female',\n 'VCTK-speaker-p230-female',\n 'VCTK-speaker-p231-female',\n 'VCTK-speaker-p232-male',\n 'VCTK-speaker-p233-female',\n 'VCTK-speaker-p234-female',\n 'VCTK-speaker-p236-female',\n 'VCTK-speaker-p237-male',\n 'VCTK-speaker-p238-female',\n 'VCTK-speaker-p239-female',\n 'VCTK-speaker-p240-female',\n 'VCTK-speaker-p241-male',\n 'VCTK-speaker-p243-male',\n 'VCTK-speaker-p244-female',\n 'VCTK-speaker-p245-male',\n 'VCTK-speaker-p246-male',\n 'VCTK-speaker-p247-male',\n 'VCTK-speaker-p248-female',\n 'VCTK-speaker-p249-female',\n 'VCTK-speaker-p250-female',\n 'VCTK-speaker-p251-male',\n 'VCTK-speaker-p252-male',\n 'VCTK-speaker-p253-female',\n 'VCTK-speaker-p254-male',\n 'VCTK-speaker-p255-male',\n 'VCTK-speaker-p256-male',\n 'VCTK-speaker-p257-female',\n 'VCTK-speaker-p258-male',\n 'VCTK-speaker-p259-male',\n 'VCTK-speaker-p260-male',\n 'VCTK-speaker-p261-female',\n 'VCTK-speaker-p262-female',\n 'VCTK-speaker-p263-male',\n 'VCTK-speaker-p264-female',\n 'VCTK-speaker-p265-female',\n 'VCTK-speaker-p266-female',\n 'VCTK-speaker-p267-female',\n 'VCTK-speaker-p268-female',\n 'VCTK-speaker-p269-female',\n 'VCTK-speaker-p270-male',\n 'VCTK-speaker-p271-male',\n 'VCTK-speaker-p272-male',\n 'VCTK-speaker-p273-male',\n 'VCTK-speaker-p274-male',\n 'VCTK-speaker-p275-male',\n 'VCTK-speaker-p276-female',\n 'VCTK-speaker-p277-female',\n 'VCTK-speaker-p278-male',\n 'VCTK-speaker-p279-male',\n 'VCTK-speaker-p280-female',\n 'VCTK-speaker-p281-male',\n 'VCTK-speaker-p282-female',\n 'VCTK-speaker-p283-female',\n 'VCTK-speaker-p284-male',\n 'VCTK-speaker-p285-male',\n 'VCTK-speaker-p286-male',\n 'VCTK-speaker-p287-male',\n 'VCTK-speaker-p288-female',\n 'VCTK-speaker-p292-male',\n 'VCTK-speaker-p293-female',\n 'VCTK-speaker-p294-female',\n 'VCTK-speaker-p295-female',\n 'VCTK-speaker-p297-female',\n 'VCTK-speaker-p298-male',\n 'VCTK-speaker-p299-female',\n 'VCTK-speaker-p300-female',\n 'VCTK-speaker-p301-female',\n 'VCTK-speaker-p302-male',\n 'VCTK-speaker-p303-female',\n 'VCTK-speaker-p304-male',\n 'VCTK-speaker-p305-female',\n 'VCTK-speaker-p306-female',\n 'VCTK-speaker-p307-female',\n 'VCTK-speaker-p308-female',\n 'VCTK-speaker-p310-female',\n 'VCTK-speaker-p311-male',\n 'VCTK-speaker-p312-female',\n 'VCTK-speaker-p313-female',\n 'VCTK-speaker-p314-female',\n 'VCTK-speaker-p316-male',\n 'VCTK-speaker-p317-female',\n 'VCTK-speaker-p318-female',\n 'VCTK-speaker-p323-female',\n 'VCTK-speaker-p326-male',\n 'VCTK-speaker-p329-female',\n 'VCTK-speaker-p330-female',\n 'VCTK-speaker-p333-female',\n 'VCTK-speaker-p334-male',\n 'VCTK-speaker-p335-female',\n 'VCTK-speaker-p336-female',\n 'VCTK-speaker-p339-female',\n 'VCTK-speaker-p340-female',\n 'VCTK-speaker-p341-female',\n 'VCTK-speaker-p343-female',\n 'VCTK-speaker-p345-male',\n 'VCTK-speaker-p347-male',\n 'VCTK-speaker-p351-female',\n 'VCTK-speaker-p360-male',\n 'VCTK-speaker-p361-female',\n 'VCTK-speaker-p362-female',\n 'VCTK-speaker-p363-male',\n 'VCTK-speaker-p364-male',\n 'VCTK-speaker-p374-male',\n 'VCTK-speaker-p376-male']\n\n # speaker index list for training and validation\n n_speaker = len(seen_speakers)\n\n # take all speakers in train and validation!!!\n train_speakers = seen_speakers\n valid_speakers = seen_speakers\n print('number of VCTK speakers = %d' % n_speaker)\n\n sp2id = {sp: i for i, sp in enumerate(seen_speakers)}\n id2sp = {i: sp for i, sp in enumerate(seen_speakers)}\n\n return seen_speakers, sp2id, id2sp" ]
[ "0.5804902", "0.5754538", "0.56921846", "0.56177425", "0.55703396", "0.5568995", "0.5478271", "0.5457741", "0.5398982", "0.53391945", "0.5301369", "0.5275877", "0.52202857", "0.51787615", "0.5157603", "0.51495886", "0.5125665", "0.512152", "0.5121497", "0.5117309", "0.50895387", "0.50793", "0.50743616", "0.5070662", "0.5070132", "0.50671345", "0.50386196", "0.5024982", "0.5017773", "0.5013281", "0.5006908", "0.5005584", "0.5001473", "0.49984536", "0.49965826", "0.4992491", "0.49892846", "0.49873444", "0.4986239", "0.49857658", "0.49708155", "0.49646875", "0.49603015", "0.49530193", "0.49472153", "0.49367604", "0.4935603", "0.49347997", "0.49253425", "0.49239826", "0.49142653", "0.48921168", "0.48867512", "0.4881677", "0.48704728", "0.4869143", "0.4868113", "0.48612425", "0.48514634", "0.48448396", "0.4820341", "0.48202518", "0.48143968", "0.48079407", "0.4806066", "0.48037335", "0.47990027", "0.4794564", "0.47917742", "0.47842965", "0.47748002", "0.47745025", "0.47703668", "0.47633976", "0.47601813", "0.47457892", "0.47449282", "0.4743647", "0.47418684", "0.47403702", "0.4735069", "0.4734034", "0.47335082", "0.4728612", "0.47278753", "0.47234118", "0.47229722", "0.4718048", "0.47143376", "0.47123525", "0.47053427", "0.47037935", "0.46983692", "0.46964455", "0.46955836", "0.46919113", "0.46784016", "0.4672519", "0.46678752", "0.46664703" ]
0.5837831
0
auto change 'date' onchange of 'x_start_date
def _compute_date_from_x_start_date(self): for ts_line in self: if ts_line.x_start_date: st_datetime = fields.Datetime.from_string( ts_line.x_start_date) # autocomplete date from start date st_date_tz = fields.Datetime.context_timestamp( self, st_datetime).date() ts_line.date = st_date_tz
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def onchange_start_date(self, start_date=False):\n if not start_date:\n return {}\n result = {'value': {'last_renovation_date': start_date}}\n return result", "def set_start_date(self, start_date):\n self.set_value_into_input_field(self.start_date_inputbox_locator, start_date)", "def set_start_date(self, date):\n pass", "def on_date_change(self):\n self.date = self.ui.calendarWidget.selectedDate()\n self.update_views()", "def start_date(self, start_date):\n self._start_date = start_date", "def date_start(self, date_start):\n\n self._date_start = date_start", "def _select_date_changed(self):\n self.model.edit_traits(view=View(\n UCustom('date'),\n buttons=['OK'],\n title=u'数据生成日期选择',\n kind='panel',\n ))", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date", "def set_to_date(self):\n self.set_value_into_input_field(self.set_to_date_locator, self.get_current_date())", "def start_date(self, start_date):\n \n self._start_date = start_date", "def set_begin_date(self, begin_date):\n self.set_value_into_input_field(self.begin_date_inputbox_locator, begin_date)", "def model_start_date(self, model_start_date):\n\n self._model_start_date = model_start_date", "def onchange_rh_date(self):\n if self._context.get('load_from_rh'):\n self.onchange_rh_job()", "def set_billing_cycle_begin_date(self, begin_date):\n if begin_date == \"\":\n current_date = datetime.date.today()\n begin_date = current_date.replace(day=1)\n begin_date = begin_date.strftime(\"%m/%d/%Y\")\n self.set_value_into_input_field(self.billing_cycle_begin_date_inputbox_locator, begin_date)", "def date(self, value):\n self.date_value = value", "def set_from_date(self, date):\n self.set_value_into_input_field(self.set_from_date_locator, date)", "def dt_changed(self):\n self.dateTimeEdit_2.setMinimumDateTime(self.dateTimeEdit.dateTime())", "def setSelectedDate(self, data):\n # print('setSelectedDate ', data)\n self.currentDate = data", "def set_document_date(self, date):\n self.set_value_into_input_field(self.document_date_text_field_locator, date)", "def start_date(self, start_date):\n if start_date is None:\n raise ValueError(\"Invalid value for `start_date`, must not be `None`\")\n\n self._start_date = start_date", "def date(self, new_date):\n self._date.date = new_date", "def start_date(self, start_date):\n if start_date is None:\n start_date = datetime.now() - timedelta(days=365)\n\n self._start_date = dt_utils.parse_date(start_date)", "def set_adjustment_charge_begin_date(self, begin_date):\n self.set_value_into_input_field(self.adjustment_begin_date_locator, begin_date)", "def set_from_date_as_current_date(self):\n self.set_value_into_input_field(self.set_from_date_locator, self.get_current_date())", "def _set_dates(self, case_date):\n d1 = case_date - timedelta(days=self.interval)\n e1 = case_date\n\n start_date_mdy = datetime.strftime(d1, \"%m/%d/%Y\")\n end_date_mdy = datetime.strftime(case_date, \"%m/%d/%Y\")\n start_date = str(\n {\n \"valueAsString\": f\"{d1}-00-00-00\",\n \"lastSetTextBoxValue\": f\"{start_date_mdy}\",\n }\n )\n end_date = str(\n {\n \"valueAsString\": f\"{e1}-00-00-00\",\n \"lastSetTextBoxValue\": f\"{end_date_mdy}\",\n }\n )\n self.data[f\"{self.x}$startDate$dateInput\"] = start_date_mdy\n self.data[f\"{self.x}$endDate$dateInput\"] = end_date_mdy\n self.data[f\"{self.y}_startDate_dateInput_ClientState\"] = start_date\n self.data[f\"{self.y}_endDate_dateInput_ClientState\"] = end_date\n self.data[f\"{self.x}$btnSearch\"] = \"Search\"\n self.data[\n f\"{self.x}$radGridOpinions$ctl00$ctl03$ctl01$PageSizeComboBox\"\n ] = \"20\"", "def set_date(self, date):\n self.date = date", "def set_begin_date_for_search(self, begin_date):\n self.set_value_into_input_field(self.begin_date_locator, begin_date)", "def dateB(self):\r\n self.date = self.cal.selectedDate()\r\n self.lineEditWidgets[\"CUMPLEAÑOS\"].setText(\r\n self.date.toString(\"yyyy-MM-dd\"))", "def showSelectedDate(self):\n pass", "def set_date(self, date):\n self.date = date\n return", "def _date(self, _date):\n\n self.__date = _date", "def _date(self, _date):\n\n self.__date = _date", "def set_date(self, date):\n self.data['date'] = date", "def setBaseDate(self, date=None):\n if date is None:\n date = self.oggi\n self.baseDate = QDate(date.year(), date.month(), 1)", "def set_bulk_add_begin_date(self, begin_date):\n if begin_date == \"\":\n begin_date = self.get_date(current_date=True)\n self.set_value_into_input_field(self.bulk_add_begin_date_inputbox_locator, begin_date)\n global bulk_add_begin_date\n bulk_add_begin_date = datetime.datetime.strptime(begin_date, \"%m/%d/%Y\")\n return begin_date", "def startdate_param(self):\n return self.startdate_display", "def set_charge_begin_date(self, charge_begin_date, first_day_of_last_month):\n self.current_charge_begin_date = charge_begin_date\n if first_day_of_last_month is True:\n if charge_begin_date == \"\":\n self.first_day_of_previous_month = self.get_date(first_day_of_last_month=True)\n charge_begin_date = self.first_day_of_previous_month\n else:\n if charge_begin_date == \"\":\n charge_begin_date = self.get_date(current_date=True)\n self.current_charge_begin_date = datetime.datetime.strptime(charge_begin_date, \"%m/%d/%Y\")\n self.set_value_in_grid_column(self.charges_grid_div_id, self.charge_begin_date_column_name, charge_begin_date, True)\n page_header_element = self.wait().until(EC.element_to_be_clickable(self.page_header_locator), 'page header locator not found before specified time out')\n page_header_element.click()", "def date(self, date):\n self.value = date.strftime(\"%Y-%m-%d\") if date else \"\"", "def start1(self): \n return self.ddmmyyyy(self.rowTime.start)", "def set_statement_begin_date(self, begin_date):\n begin_date_to_set = None\n if begin_date != \"\":\n begin_date_to_set = begin_date\n else:\n self.first_day_of_previous_month = self.get_date(first_day_of_last_month=True)\n begin_date_to_set = self.first_day_of_previous_month\n self.set_value_into_input_field(self.statement_begin_date_locator, begin_date_to_set)\n return begin_date_to_set", "def setCurrentDate(self, date, dbConnection):\n if type(date) == QtCore.QDate:\n date = QtCore.QDate.toPyDate(date)\n date = datetime.datetime.combine(date, datetime.datetime.min.time())\n date = timehelper.localizeutc(date)\n if self.currentDate == date:\n pass\n else:\n self.currentDate = date\n self.currentDateEntries = self.getEntriesforDate(date, dbConnection)\n self.labelingStatusForCurrentDate = LabelingStatusOfDate(self.currentDate, self.currentDateEntries)\n logging.info('currently selected date: {}'.format(self.currentDate))", "def pickDate(self,event=None):\r\n curLine = self.missingDates.getSelection() # Returns a string because missingDates is a listbox control, not a treeview\r\n# print(curLine)\r\n# newData=self.fetchJournalData(curLine)\r\n# self.setData(newData)\r\n self.date.setDateText(curLine)", "def update(self, date):\r\n self.date = date", "def start_date(self, start_date):\n if self.local_vars_configuration.client_side_validation and start_date is None: # noqa: E501\n raise ValueError(\"Invalid value for `start_date`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n start_date is not None and len(start_date) < 1):\n raise ValueError(\"Invalid value for `start_date`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._start_date = start_date", "def date(self, date):\n self._date = date", "def setDate(self, p_int, p_int_1, p_int_2): # real signature unknown; restored from __doc__\r\n return False", "def start_date(self):\n return self.__start_date", "def date(self):", "def begin_date(self, value):\n\n if not isinstance(value, datetime):\n raise TypeError(_pretty_message(\n '''\n begin_date must be an instance of datetime.datetime, not %s\n ''',\n _type_name(value)\n ))\n\n self._begin_date = value", "def starting_date(self):\n return datetime.date(2016, 1, 4)", "def _get_start_date(self):\n today = datetimedate.today()\n if self.start == 'week':\n start_date = today - timedelta(days=today.weekday())\n elif self.start == 'month':\n start_date = today.replace(day=1)\n elif self.start == 'quarter':\n quarter = math.ceil(today.month / 3)\n start_date = datetimedate(\n today.year,\n ((quarter - 1) * 3) + 1,\n 1\n )\n elif self.start == 'year':\n start_date = datetimedate(today.year, 1, 1)\n elif self.start == 'all':\n start_date = datetimedate(2010, 1, 1)\n else:\n try:\n start_date = datetime.strptime(self.start, \"%Y-%m-%d\").date()\n except Exception as e:\n raise ParseError(\"start argument not valid\")\n\n self.start_date = start_date", "def adjust_start_and_end_dates(self):\n if self.start_date < self.install_date:\n self.start_date = self.install_date\n log.info(\"Adjusting start date to {}.\".format(self.start_date))\n\n today = datetime.today().date()\n if self.end_date > today:\n self.end_date = today\n log.info(\"Adjusting end date to {}\".format(self.end_date))\n\n if self.start_date > self.end_date:\n self.end_date = self.start_date + timedelta(days=1)\n log.info(\"Adjusting end date to {}\".format(self.end_date))", "def rating_start_date(self, rating_start_date):\n\n self._rating_start_date = rating_start_date", "def computed_startdate(self):\n return self.startdate", "def _onchange_date_from(self):\n\t\tdate_from = self.date_from\n\t\tdate_to = self.date_to\n\t\tself.compute_valid_leaves_for_employee(date_from, date_to)\n\n\t\t# policy_id = self.env['leaves.policy'].sudo().search(\n\t\t# \t[('leave_type', '=', self.holiday_status_id.id), ('company_id', '=', self.env.user.company_id.id)])\n\t\t# if date_from and not date_to:\n\t\t# \tdate_to_with_delta = fields.Datetime.from_string(date_from) + timedelta(hours=8)\n\t\t# \tself.date_to = str(date_to_with_delta)\n\t\t# \tnumber_of_day = (datetime.strptime(self.date_to, DEFAULT_SERVER_DATETIME_FORMAT) - datetime.strptime(date_from, DEFAULT_SERVER_DATETIME_FORMAT)).total_seconds()/(24*3600)\n\t\t# \tself.number_of_days_temp = number_of_day\n\t\t# # Compute and update the number of days\n\t\t# if (date_to and date_from) and (date_from <= date_to):\n\t\t# \tif policy_id:\n\t\t# \t\tfor val in policy_id:\n\t\t# \t\t\tnumber_of_days = 0\n\t\t# \t\t\tif val.weekends_leave_period == 'dont_count':\n\t\t# \t\t\t\tnum_days = self._get_number_of_days(date_from, date_to, self.employee_id.id)\n\t\t# \t\t\t\tdate_to1 = datetime.strptime(date_to, '%Y-%m-%d %H:%M:%S')\n\t\t# \t\t\t\tdate_from1 = datetime.strptime(date_from, '%Y-%m-%d %H:%M:%S')\n\t\t#\n\t\t# \t\t\t\t# Logic of Public Holidays when week offs count as holidays is True 2019-11-19\n\t\t# \t\t\t\temp_shift = self.employee_id.resource_calendar_ids\n\t\t# \t\t\t\tglobal_leaves = emp_shift.global_leave_ids\n\t\t# \t\t\t\t# List to store the global leaves\n\t\t# \t\t\t\tpublic_holidays = []\n\t\t# \t\t\t\tfor holiday in global_leaves:\n\t\t# \t\t\t\t\tpublic_holidays.append((holiday.date_from, holiday.date_to))\n\t\t#\n\t\t# \t\t\t\t# Public holidays between leave period\n\t\t# \t\t\t\tleave_period_dates = []\n\t\t# \t\t\t\tstart_date = date_from1.date()\n\t\t# \t\t\t\tend_date = date_to1.date()\n\t\t# \t\t\t\tdelta = end_date - start_date\n\t\t# \t\t\t\tfor i in range(delta.days + 1):\n\t\t# \t\t\t\t\tday = start_date + timedelta(days=i)\n\t\t# \t\t\t\t\tleave_period_dates.append(day)\n\t\t# \t\t\t\tcount = 0\n\t\t# \t\t\t\tfor date in public_holidays:\n\t\t# \t\t\t\t\tif datetime.strptime(date[0], '%Y-%m-%d %H:%M:%S').date() in leave_period_dates:\n\t\t# \t\t\t\t\t\tcount += 1\n\t\t# \t\t\t# End of Public Holidays logic\n\t\t#\n\t\t# \t\t\t\tself.number_of_days_temp = num_days - count\n\t\t# \t\t\telse:\n\t\t# \t\t\t\tnumber_of_days = self._get_number_of_days(date_from, date_to, self.employee_id.id)\n\t\t# \t\t\t\tdate_to1 = datetime.strptime(date_to, '%Y-%m-%d %H:%M:%S')\n\t\t# \t\t\t\tdate_from1 = datetime.strptime(date_from, '%Y-%m-%d %H:%M:%S')\n\t\t# \t\t\t\tif val.dur_full and not val.dur_half:\n\t\t# \t\t\t\t\ttotal_days = (date_to1 - date_from1).days\n\t\t# \t\t\t\telse:\n\t\t# \t\t\t\t\ttotal_seconds = (date_to1 - date_from1).seconds\n\t\t# \t\t\t\t\ttotal_days = total_seconds / (24 * 3600)\n\t\t#\n\t\t# \t\t\t\tweek_offs = total_days - number_of_days\n\t\t# \t\t\t\tself.number_of_days_temp = number_of_days + week_offs\n\t\t# \telse:\n\t\t# \t\t# self.number_of_days_temp = self._get_number_of_days(date_from, date_to, self.employee_id.id)\n\t\t# \t\tnumber_of_day = (datetime.strptime(self.date_to, DEFAULT_SERVER_DATETIME_FORMAT) - datetime.strptime(\n\t\t# \t\t\tdate_from, DEFAULT_SERVER_DATETIME_FORMAT)).total_seconds() / (24 * 3600)\n\t\t# \t\tself.number_of_days_temp = number_of_day\n\t\t#\n\t\t# elif (date_to and date_from) and (date_from > date_to):\n\t\t# \traise ValidationError(\"From Date cannot be greater then To Date\")\n\t\t# else:\n\t\t# \tself.number_of_days_temp = 0", "def model_start_date(self):\n return self._model_start_date", "def start_date(self, start_date: str):\n if start_date is None:\n raise ValueError(\"Invalid value for `start_date`, must not be `None`\") # noqa: E501\n\n self._start_date = start_date", "def setEvaluationDate(cell):\n global _qToday\n \n _qToday = toDate(cell.value)\n if not to_date:\n _qToday = Settings.instance().getEvaluationDate()\n else:\n Settings.instance().setEvaluationDate(_qToday)\n \n return _qToday.ISO()", "def on_date(self, date):\n print 'This is an empty on_date(date={}) function.\\nThe user must override this.'.format(date)\n return self", "def get_start_date(self):\n return \"%d%02d\" % (self.year, self.term)", "def set_add_dispute_date(self, date):\n self.set_value_into_input_field(self.add_dispute_date_inputbox_locator, date)", "def start_date(self) -> str:\n return pulumi.get(self, \"start_date\")", "def start_date(self) -> str:\n return pulumi.get(self, \"start_date\")", "def date(self, date):\n\n self._date = date", "def date(self, date):\n\n self._date = date", "def date(self, date):\n\n self._date = date", "def date(self, date):\n\n self._date = date", "def date(self, date):\n\n self._date = date", "def selected_date(self, selected_date):\n\n self._selected_date = selected_date", "def start_date(self):\n return self._start_date", "def start_date(self):\n return self._start_date", "def start_date(self):\n return self._start_date", "def start_date(self):\n return self._start_date", "def change_date(self, date):\n self.date = date\n relative_url = \"https://www.sevenrooms.com/manager/twelvewest/reservations/day/\" + date.strftime(\"%m-%d-20%y\")\n self.driver.get(relative_url)\n self.update_html()", "def dep_date(self, dep_date):\n\n self._dep_date = dep_date", "def _increment_date_data(klass, series, date_data):\n\n # delta is the timedelta in between events\n delta = timedelta(days=7 * series.every)\n date_data['start_date'] = date_data['start_date'] + delta\n date_data['end_date'] = date_data['end_date'] + delta", "def start_date(self) -> str:\n return self._start_date", "def start_date(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"start_date\")", "def set_non_recurring_charge_begin_date(self, begin_date):\n if begin_date is None:\n statement_summary_begin_date_element = self.wait().until(EC.presence_of_element_located(self.statement_details_begin_date_locator), 'statement details begin date locator not found before specified time out')\n non_recurring_charge_begin_date = str(statement_summary_begin_date_element.text)\n elif begin_date == \"\":\n current_date = datetime.date.today()\n non_recurring_charge_begin_date = \"%d/%d/%d\" % (current_date.month, current_date.day, current_date.year)\n else:\n non_recurring_charge_begin_date = begin_date\n self.set_value_into_input_field(self.non_recurring_charge_begin_date_locator, non_recurring_charge_begin_date)\n return non_recurring_charge_begin_date", "def set_datetime(self, date):\n self.date = date", "def init_date( self ) -> datetime:\n return datetime( 2011 ,2 ,1 )", "def set_date(self, date):\n self.date = self.date_to_local(date)\n # ephem deals only in UTC\n self.site.date = ephem.Date(self.date_to_utc(self.date))", "def onchange_date(self, cr, user, ids, date, context=None):\n res = {}\n if context is None:\n context = {}\n period_pool = self.pool.get('account.period')\n ctx = dict(context, account_period_prefer_normal=True)\n pids = period_pool.find(cr, user, date, context=ctx)\n if pids:\n res.update({\n 'period_id':pids[0]\n })\n context.update({\n 'period_id':pids[0]\n })\n return {\n 'value':res,\n 'context':context,\n }", "def _fill_date(self):\n if not self.date['year']:\n self.date['year'] = self.DEFAULT_DATE['year']\n if not self.date['month']:\n self.date['month'] = self.DEFAULT_DATE['month']\n if not self.date['day']:\n self.date['day'] = self.DEFAULT_DATE['day']", "def append_cvr_start_date(soup: BeautifulSoup, row: Restaurant) -> Restaurant:\n start_date_elem = soup.find(\n 'div', attrs={'class': 'Help-stamdata-data-startdato'})\n if start_date_elem:\n start_date_elem = start_date_elem.parent.parent.parent\n date = datetime.strptime(\n list(start_date_elem.children)[3].text.strip(),\n '%d.%m.%Y'\n )\n row.start_date = date\n print(f'date: {row.start_date}')\n else:\n row.start_date = None\n\n return row", "def interview_date_default(self, interview_date_default):\n\n self._interview_date_default = interview_date_default", "def from_date(self, value: date):\n self._from_date = value\n self._dao.from_date = value", "def startdate_display(self):\n if self.startdate:\n return self.startdate.strftime(self.format)", "def get_start_date(self, req, milestone):\n\n if milestone.start:\n return milestone.start.date()\n elif 'approx_start_date' in req.args:\n return datetime.strptime(req.args['approx_start_date'], '%Y-%m-%d').date() + timedelta(days=1)", "def coverage_start_date(self, coverage_start_date):\n\n self._coverage_start_date = coverage_start_date", "def coverage_start_date(self, coverage_start_date):\n\n self._coverage_start_date = coverage_start_date", "def start(self, start):\n try:\n self._set('start', Timestamp.to_datetime(start))\n except (TypeError, ValueError) as e:\n raise ValidationError(e)" ]
[ "0.7639792", "0.7559864", "0.733876", "0.701777", "0.7012194", "0.6946314", "0.6936835", "0.68256146", "0.68256146", "0.68256146", "0.68256146", "0.68256146", "0.68256146", "0.68256146", "0.68256146", "0.68256146", "0.67880905", "0.6787431", "0.67594075", "0.67241603", "0.6661433", "0.65644145", "0.6554636", "0.65232176", "0.64129144", "0.6408936", "0.63733834", "0.63598675", "0.63162917", "0.62854934", "0.6282772", "0.62826294", "0.62752056", "0.6207757", "0.6203093", "0.61288863", "0.61079454", "0.60912186", "0.60871744", "0.60871744", "0.6084931", "0.60649735", "0.60535246", "0.60518837", "0.6027577", "0.6026937", "0.60049844", "0.6001844", "0.5993319", "0.59643793", "0.592539", "0.5856313", "0.58202356", "0.5818916", "0.5816196", "0.5802206", "0.5799888", "0.5786358", "0.5776875", "0.5765798", "0.5756451", "0.5692194", "0.56909436", "0.56901264", "0.56871265", "0.5685391", "0.567559", "0.56681514", "0.56680906", "0.5628376", "0.5628376", "0.5626323", "0.5626323", "0.5626323", "0.5626323", "0.5626323", "0.56145185", "0.561384", "0.561384", "0.561384", "0.561384", "0.5612152", "0.56021446", "0.55897933", "0.5584028", "0.5579915", "0.5574602", "0.55705464", "0.55527556", "0.553829", "0.5513117", "0.5504566", "0.5503907", "0.55023223", "0.5494885", "0.5480769", "0.5476707", "0.54700065", "0.54700065", "0.54699665" ]
0.7176086
3
auto calculate 'hours' onchange of 'x_start_date or x_end_date
def _compute_duration(self): diff_float = 0 for ts_line in self: if ts_line.x_start_date: st_datetime = fields.Datetime.from_string( ts_line.x_start_date) # autocomplete date from start date st_date_tz = fields.Datetime.context_timestamp( self, st_datetime).date() ts_line.date = st_date_tz # autocomplete name from start date st_datetime_tz = fields.Datetime.context_timestamp( self, st_datetime) string_st_dt_tz = fields.Datetime.to_string(st_datetime_tz) ts_line.name = ts_line.user_id.name + '/' + string_st_dt_tz en_datetime = fields.Datetime.from_string( ts_line.x_end_date) diff = en_datetime - st_datetime if(time(1, 00) <= st_datetime.time() <= time(5, 00)): if(time(6, 00) <= en_datetime.time() <= time(10, 00)): # del 1 hour for breaking lunch diff_float = round(diff.total_seconds() / 3600.0, 2)-1 else: diff_float = round(diff.total_seconds() / 3600.0, 2) ts_line.unit_amount = diff_float
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_number_of_hours(self):\n if self.date_to:\n DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n from_dt = datetime.strptime(self.date_from, DATETIME_FORMAT)\n to_dt = datetime.strptime(self.date_to, DATETIME_FORMAT)\n timedelta = to_dt - from_dt\n diff_day =(float(timedelta.seconds) / 3600) - self.break_hour\n self.number_of_hours_temp = diff_day", "def <start>/<end>(<start>/<end>)\ndef calc_temps(start_date, end_date):", "def hours(input=None):\n return get(input).hours", "def _check_hours(self):\n for record in self:\n if record.start_hour or record.end_hour:\n if int(record.start_hour) == int(record.end_hour):\n raise ValidationError(_('Please enter different Start Hours and End Hours!'))\n if int(record.start_hour) > int(record.end_hour):\n raise ValidationError(_('Start hours can not be greater than end hours for the day.'))", "def get_hours(self, date = \"\"):\n\n if date == \"\":\n DATE = datetime.today()\n else:\n year, month, day = date.split('-')\n DATE = datetime(int(year), int(month), int(day))\n\n s = requests.get(\"https://api.wdpro.disney.go.com/facility-service/schedules/{}?date={}-{}-{}\".format(self.__id, DATE.year, self.__formatDate(str(DATE.month)), self.__formatDate(str(DATE.day))), headers=getHeaders())\n data = json.loads(s.content)\n\n operating_hours_start = None\n operating_hours_end = None\n extra_hours_start = None\n extra_hours_end = None\n\n try:\n for i in range(len(data['schedules'])):\n if data['schedules'][i]['type'] == 'Operating':\n operating_hours_start = datetime(DATE.year, DATE.month, DATE.day, int(data['schedules'][i]['startTime'][0:2]), int(data['schedules'][i]['startTime'][3:5]))\n if int(data['schedules'][i]['endTime'][0:2]) >= 0 and int(data['schedules'][i]['endTime'][0:2]) <= 7:\n DATETEMP = DATE + timedelta(days=1)\n operating_hours_end = datetime(DATETEMP.year, DATETEMP.month, DATETEMP.day, int(data['schedules'][i]['endTime'][0:2]), int(data['schedules'][i]['endTime'][3:5]))\n else:\n operating_hours_end = datetime(DATE.year, DATE.month, DATE.day, int(data['schedules'][i]['endTime'][0:2]), int(data['schedules'][i]['endTime'][3:5]))\n\n if data['schedules'][i]['type'] == \"Special Ticketed Event\":\n extra_hours_start = datetime(DATE.year, DATE.month, DATE.day, int(data['schedules'][i]['startTime'][0:2]), int(data['schedules'][i]['startTime'][3:5]))\n if int(data['schedules'][i]['endTime'][0:2]) >= 0 and int(data['schedules'][i]['endTime'][0:2]) <= 7:\n DATETEMP = DATE + timedelta(days=1)\n extra_hours_end = datetime(DATETEMP.year, DATETEMP.month, DATETEMP.day, int(data['schedules'][i]['endTime'][0:2]), int(data['schedules'][i]['endTime'][3:5]))\n else:\n operating_hours_end = datetime(DATE.year, DATE.month, DATE.day, int(data['schedules'][i]['endTime'][0:2]), int(data['schedules'][i]['endTime'][3:5]))\n\n except KeyError:\n pass\n return operating_hours_start, operating_hours_end, extra_hours_start, extra_hours_end", "def test_start_end_hour():\n # sh = None\n # eh = None\n # data = None\n # result = makesky.start_end_hour(sh, eh, data)\n pass", "def interval_hours(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"interval_hours\")", "def t_conv_func(t_end,t_start,input_type='datetime',output_units='hours'):\n if input_type == \"datetime\":\n diff = (t_end-t_start).total_seconds ()\n elif input_type == \"seconds\":\n diff = t_end-t_start\n else:\n raise NotImplementedError\n\n if output_units == 'seconds':\n return diff\n elif output_units == 'minutes':\n return diff/60.0\n elif output_units == 'hours':\n return diff/3600.0\n else:\n raise NotImplementedError", "def _onchange_date_from(self):\n\t\tdate_from = self.date_from\n\t\tdate_to = self.date_to\n\t\tself.compute_valid_leaves_for_employee(date_from, date_to)\n\n\t\t# policy_id = self.env['leaves.policy'].sudo().search(\n\t\t# \t[('leave_type', '=', self.holiday_status_id.id), ('company_id', '=', self.env.user.company_id.id)])\n\t\t# if date_from and not date_to:\n\t\t# \tdate_to_with_delta = fields.Datetime.from_string(date_from) + timedelta(hours=8)\n\t\t# \tself.date_to = str(date_to_with_delta)\n\t\t# \tnumber_of_day = (datetime.strptime(self.date_to, DEFAULT_SERVER_DATETIME_FORMAT) - datetime.strptime(date_from, DEFAULT_SERVER_DATETIME_FORMAT)).total_seconds()/(24*3600)\n\t\t# \tself.number_of_days_temp = number_of_day\n\t\t# # Compute and update the number of days\n\t\t# if (date_to and date_from) and (date_from <= date_to):\n\t\t# \tif policy_id:\n\t\t# \t\tfor val in policy_id:\n\t\t# \t\t\tnumber_of_days = 0\n\t\t# \t\t\tif val.weekends_leave_period == 'dont_count':\n\t\t# \t\t\t\tnum_days = self._get_number_of_days(date_from, date_to, self.employee_id.id)\n\t\t# \t\t\t\tdate_to1 = datetime.strptime(date_to, '%Y-%m-%d %H:%M:%S')\n\t\t# \t\t\t\tdate_from1 = datetime.strptime(date_from, '%Y-%m-%d %H:%M:%S')\n\t\t#\n\t\t# \t\t\t\t# Logic of Public Holidays when week offs count as holidays is True 2019-11-19\n\t\t# \t\t\t\temp_shift = self.employee_id.resource_calendar_ids\n\t\t# \t\t\t\tglobal_leaves = emp_shift.global_leave_ids\n\t\t# \t\t\t\t# List to store the global leaves\n\t\t# \t\t\t\tpublic_holidays = []\n\t\t# \t\t\t\tfor holiday in global_leaves:\n\t\t# \t\t\t\t\tpublic_holidays.append((holiday.date_from, holiday.date_to))\n\t\t#\n\t\t# \t\t\t\t# Public holidays between leave period\n\t\t# \t\t\t\tleave_period_dates = []\n\t\t# \t\t\t\tstart_date = date_from1.date()\n\t\t# \t\t\t\tend_date = date_to1.date()\n\t\t# \t\t\t\tdelta = end_date - start_date\n\t\t# \t\t\t\tfor i in range(delta.days + 1):\n\t\t# \t\t\t\t\tday = start_date + timedelta(days=i)\n\t\t# \t\t\t\t\tleave_period_dates.append(day)\n\t\t# \t\t\t\tcount = 0\n\t\t# \t\t\t\tfor date in public_holidays:\n\t\t# \t\t\t\t\tif datetime.strptime(date[0], '%Y-%m-%d %H:%M:%S').date() in leave_period_dates:\n\t\t# \t\t\t\t\t\tcount += 1\n\t\t# \t\t\t# End of Public Holidays logic\n\t\t#\n\t\t# \t\t\t\tself.number_of_days_temp = num_days - count\n\t\t# \t\t\telse:\n\t\t# \t\t\t\tnumber_of_days = self._get_number_of_days(date_from, date_to, self.employee_id.id)\n\t\t# \t\t\t\tdate_to1 = datetime.strptime(date_to, '%Y-%m-%d %H:%M:%S')\n\t\t# \t\t\t\tdate_from1 = datetime.strptime(date_from, '%Y-%m-%d %H:%M:%S')\n\t\t# \t\t\t\tif val.dur_full and not val.dur_half:\n\t\t# \t\t\t\t\ttotal_days = (date_to1 - date_from1).days\n\t\t# \t\t\t\telse:\n\t\t# \t\t\t\t\ttotal_seconds = (date_to1 - date_from1).seconds\n\t\t# \t\t\t\t\ttotal_days = total_seconds / (24 * 3600)\n\t\t#\n\t\t# \t\t\t\tweek_offs = total_days - number_of_days\n\t\t# \t\t\t\tself.number_of_days_temp = number_of_days + week_offs\n\t\t# \telse:\n\t\t# \t\t# self.number_of_days_temp = self._get_number_of_days(date_from, date_to, self.employee_id.id)\n\t\t# \t\tnumber_of_day = (datetime.strptime(self.date_to, DEFAULT_SERVER_DATETIME_FORMAT) - datetime.strptime(\n\t\t# \t\t\tdate_from, DEFAULT_SERVER_DATETIME_FORMAT)).total_seconds() / (24 * 3600)\n\t\t# \t\tself.number_of_days_temp = number_of_day\n\t\t#\n\t\t# elif (date_to and date_from) and (date_from > date_to):\n\t\t# \traise ValidationError(\"From Date cannot be greater then To Date\")\n\t\t# else:\n\t\t# \tself.number_of_days_temp = 0", "def get_time_delta_in_hours(start, end):\n dhour = end.hour - start.hour\n dmin = end.minute - start.minute\n dsec = end.second - start.second\n dtime = timedelta(hours=dhour, minutes=dmin, seconds=dsec) # NOTE rounds to nearest second\n # print start, end, dtime\n return float(dtime.seconds) / (60*60)", "def _get_number_of_hours(self, date_from, date_to, istirahat):\n\n DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n from_dt = datetime.strptime(date_from, DATETIME_FORMAT)\n to_dt = datetime.strptime(date_to, DATETIME_FORMAT)\n timedelta = to_dt - from_dt\n diff_day =(float(timedelta.seconds) / 3600) - istirahat\n return diff_day", "def get_hours_by_weekday(self, cr, uid, tpl_id, day_no, context=None):\n\n delta = timedelta(seconds=0)\n tpl = self.browse(cr, uid, tpl_id, context=context)\n for worktime in tpl.worktime_ids:\n if int(worktime.dayofweek) != day_no:\n continue\n\n fromHour, fromSep, fromMin = worktime.hour_from.partition(':')\n toHour, toSep, toMin = worktime.hour_to.partition(':')\n if len(fromSep) == 0 or len(toSep) == 0:\n raise orm.except_orm(\n 'Invalid Data', 'Format of working hours is incorrect')\n\n delta += (\n datetime.strptime(toHour + ':' + toMin, '%H:%M') -\n datetime.strptime(fromHour + ':' + fromMin, '%H:%M')\n )\n\n return float(delta.seconds / 60) / 60.0", "def hours(self):\n return self.config['hours']", "def tradeHours(self, context):\n raise NotImplementedError", "def hourly(self, start_time: str = \"now\", end_time: Optional[str] = None,\n fields: List[str] = list()) -> dict:\n end_time = end_time or str(pendulum.parse(start_time).add(hours=108))\n query = {\n \"start_time\": start_time,\n \"end_time\": end_time,\n \"fields\": fields or self.fields\n }\n return self.call(\"weather/forecast/hourly\", query)", "def _compute_days_tarea(self, cr, uid, ids, field, arg, context=None):\n import datetime\n result = {}\n records = self.browse(cr, uid, ids, context=context)\n for r in records:\n if r.date_start_tarea:\n d = time.strptime(r.date_start_tarea,'%Y-%m-%d %H:%M:%S')\n for r2 in records:\n if r2.date_end_tarea:\n c = time.strptime(r2.date_end_tarea,'%Y-%m-%d %H:%M:%S')\n delta = datetime.datetime(c[0], c[1], c[2]) - datetime.datetime(d[0], d[1], d[2])\n weeks, days = divmod(delta.days, 1)\n result[r2.id] = weeks\n return result", "def FE_start_end_date_time_features(smalldf, startTime, endTime, splitter_date_string=\"/\",splitter_hour_string=\":\"):\r\n smalldf = smalldf.copy()\r\n add_cols = []\r\n date_time_variable_flag = False\r\n if smalldf[startTime].dtype in ['datetime64[ns]','datetime16[ns]','datetime32[ns]']:\r\n print('%s variable is a date-time variable' %startTime)\r\n date_time_variable_flag = True\r\n if date_time_variable_flag:\r\n view_days = 'processing'+startTime+'_elapsed_days'\r\n smalldf[view_days] = (smalldf[endTime] - smalldf[startTime]).astype('timedelta64[s]')/(60*60*24)\r\n smalldf[view_days] = smalldf[view_days].astype(int)\r\n add_cols.append(view_days)\r\n view_time = 'processing'+startTime+'_elapsed_time'\r\n smalldf[view_time] = (smalldf[endTime] - smalldf[startTime]).astype('timedelta64[s]').values\r\n add_cols.append(view_time)\r\n else:\r\n start_date = 'processing'+startTime+'_start_date'\r\n smalldf[start_date] = smalldf[startTime].map(lambda x: x.split(\" \")[0])\r\n add_cols.append(start_date) \r\n try:\r\n start_time = 'processing'+startTime+'_start_time'\r\n smalldf[start_time] = smalldf[startTime].map(lambda x: x.split(\" \")[1])\r\n add_cols.append(start_time)\r\n except:\r\n ### there is no hour-minutes part of this date time stamp field. You can just skip it if it is not there\r\n pass\r\n end_date = 'processing'+endTime+'_end_date'\r\n smalldf[end_date] = smalldf[endTime].map(lambda x: x.split(\" \")[0])\r\n add_cols.append(end_date)\r\n try:\r\n end_time = 'processing'+endTime+'_end_time'\r\n smalldf[end_time] = smalldf[endTime].map(lambda x: x.split(\" \")[1])\r\n add_cols.append(end_time)\r\n except:\r\n ### there is no hour-minutes part of this date time stamp field. You can just skip it if it is not there\r\n pass\r\n view_days = 'processing'+startTime+'_elapsed_days'\r\n smalldf[view_days] = (pd.to_datetime(smalldf[end_date]) - pd.to_datetime(smalldf[start_date])).values.astype(int)\r\n add_cols.append(view_days)\r\n try:\r\n view_time = 'processing'+startTime+'_elapsed_time'\r\n smalldf[view_time] = (pd.to_datetime(smalldf[end_time]) - pd.to_datetime(smalldf[start_time])).astype('timedelta64[s]').values\r\n add_cols.append(view_time)\r\n except:\r\n ### In some date time fields this gives an error so skip it in that case\r\n pass\r\n #### The reason we chose endTime here is that startTime is usually taken care of by another library. So better to do this alone.\r\n year = 'processing'+endTime+'_end_year'\r\n smalldf[year] = smalldf[end_date].map(lambda x: str(x).split(splitter_date_string)[0]).values\r\n add_cols.append(year)\r\n #### The reason we chose endTime here is that startTime is usually taken care of by another library. So better to do this alone.\r\n month = 'processing'+endTime+'_end_month'\r\n smalldf[month] = smalldf[end_date].map(lambda x: str(x).split(splitter_date_string)[1]).values\r\n add_cols.append(month)\r\n try:\r\n #### The reason we chose endTime here is that startTime is usually taken care of by another library. So better to do this alone.\r\n daynum = 'processing'+endTime+'_end_day_number'\r\n smalldf[daynum] = smalldf[end_date].map(lambda x: str(x).split(splitter_date_string)[2]).values\r\n add_cols.append(daynum)\r\n except:\r\n ### In some date time fields the day number is not there. If not, just skip it ####\r\n pass\r\n #### In some date time fields, the hour and minute is not there, so skip it in that case if it errors!\r\n try:\r\n start_hour = 'processing'+startTime+'_start_hour'\r\n smalldf[start_hour] = smalldf[start_time].map(lambda x: str(x).split(splitter_hour_string)[0]).values\r\n add_cols.append(start_hour)\r\n start_min = 'processing'+startTime+'_start_hour'\r\n smalldf[start_min] = smalldf[start_time].map(lambda x: str(x).split(splitter_hour_string)[1]).values\r\n add_cols.append(start_min)\r\n except:\r\n ### If it errors, skip it\r\n pass\r\n #### Check if there is a weekday and weekends in date time columns using endTime only\r\n weekday_num = 'processing'+endTime+'_end_weekday_number'\r\n smalldf[weekday_num] = pd.to_datetime(smalldf[end_date]).dt.weekday.values\r\n add_cols.append(weekday_num)\r\n weekend = 'processing'+endTime+'_end_weekend_flag'\r\n smalldf[weekend] = smalldf[weekday_num].map(lambda x: 1 if x in[5,6] else 0)\r\n add_cols.append(weekend)\r\n #### If everything works well, there should be 13 new columns added by module. All the best!\r\n print('%d columns added using start date=%s and end date=%s processing...' %(len(add_cols),startTime,endTime))\r\n return smalldf", "def create_daily_hours(start_date, end_date):\n assert type(start_date) == str\n assert type(end_date) == str\n\n start_date = start_date + \" 00:00:00\"\n end_date = end_date + \" 23:00:00\"\n\n start_date = datetime.datetime.strptime(start_date, \"%Y-%m-%d %H:%M:%S\")\n end_date = datetime.datetime.strptime(end_date, \"%Y-%m-%d %H:%M:%S\")\n\n dates = []\n for date in utils.daterange(start_date, end_date):\n dates.append(date)\n\n return dates", "def input_starting_hours(sheet_name, workbook_path):\n question = [\n inquirer.List('starting_hours',\n message=f'Have you already invested time into {sheet_name}?',\n choices=['Yes', 'No'],\n default='No',\n )\n ]\n answer = inquirer.prompt(question)\n choice = answer['starting_hours']\n if choice == 'Yes':\n starting_hours = int(input(f\"How many hours have you already invested in {sheet_name}? \"))\n wb = openpyxl.load_workbook(workbook_path)\n ws = wb.active\n ws['A2'] = starting_hours\n next_level = 1\n level = 0\n while starting_hours >= (next_level ** 2):\n starting_hours -= next_level ** 2\n level += 1\n next_level += 1\n ws['B2'] = level\n ws['B4'] = next_level\n ws['A4'] = next_level ** 2\n ws['C2'] = starting_hours\n ws['C4'] = (next_level ** 2) - starting_hours\n wb.save(workbook_path)", "def _work_hour_value(self):\n if self.month_workdays == 0 or self.workday_hours == 0:\n self.work_hour_value = 0\n else:\n self.work_hour_value = round(self.wage / self.month_workdays / self.workday_hours, 2)", "def get_hours():\n\n print('This program calculates fees paid per hour.Enter hour in H:m using the 24 hour format.')\n\n today = datetime.today()\n start_time = input('Enter time started in H:m format: ')\n end_time = input('Enter time ended in H:m format: ')\n task = input('Enter task name: ')\n description = input('Give a brief description of task: ')\n\n # start_time_str = start_time\n start_time = datetime.strptime(start_time, '%H:%M').time()\n end_time = datetime.strptime(end_time, '%H:%M').time()\n\n # print(start_time_object, end_time_object)\n\n time_elapsed = datetime.combine(\n datetime.today(), end_time) - datetime.combine(date.today(), start_time)\n total_seconds = time_elapsed.seconds\n # print(total_seconds)\n hours = total_seconds/3600\n\n print('Number of hours spent on task is ', hours, 'hours.')\n\n get_price(hours)\n save_to_csv(today, task, description, hours, start_time, end_time)", "def duration(self):\n delta = self.occurrence.end - self.occurrence.start\n real_hours = delta.days * 24 + delta.seconds / (60.0 * 60.0)\n\n adjusted_hours = attendance_settings.HOUR_MULTIPLIER * real_hours\n\n return adjusted_hours", "def count_hours(ranges, range_start=None, range_stop=None):\n buckets = [0.0] * (24 * 7)\n one_hour = timedelta(0, 60 * 60)\n\n first = None\n last = None\n\n for start, stop in ranges:\n if ((range_start is not None and start < range_start) or\n (range_stop is not None and stop > range_stop)):\n continue\n\n if first is None:\n first = start\n last = stop\n\n open_ref = datetime(start.year, start.month, start.day, start.hour)\n open_ref += one_hour\n open_ref = min(open_ref, stop)\n open_frac = (open_ref - start) / one_hour\n buckets[24 * start.weekday() + start.hour] += open_frac\n\n if start.date() != stop.date() or start.hour != stop.hour:\n stop_ref = datetime(stop.year, stop.month, stop.day, stop.hour)\n stop_frac = (stop - stop_ref) / one_hour\n buckets[24 * stop.weekday() + stop.hour] += stop_frac\n\n start_hour = 24 * open_ref.weekday() + open_ref.hour\n stop_hour = 24 * stop.weekday() + stop.hour\n if stop_hour < start_hour - 1:\n stop_hour += 24 * 7\n\n for hour in range(start_hour, stop_hour):\n buckets[hour % (24 * 7)] += 1\n\n return buckets, first, last", "def build_convert_to_hours(time_units):\n if time_units not in VALID_TIME_UNITS:\n raise ValueError('Time units must be one of', VALID_TIME_UNITS)\n \n if time_units == 'min':\n return lambda x: x/60\n elif time_units == 'h':\n return lambda x: x", "def diveDates(self,start,finish):\n start = datetime.strptime(start,\"%Y-%m-%d\")\n finish = datetime.strptime(finish,\"%Y-%m-%d\")\n return start+(finish-start)/2", "def to_hours(self, timesteps, to_label=False):\n out = timesteps*self.dt/(60*60)\n if to_label:\n out = [ '{:.2f} hours'.format(el) for el in out ]\n return out", "def to_hours(self, timesteps, to_label=False):\n out = timesteps*self.dt/(60*60)\n if to_label:\n out = [ '{:.2f} hours'.format(el) for el in out ]\n return out", "def to_hours(self, timesteps, to_label=False):\n out = timesteps*self.dt/(60*60)\n if to_label:\n out = [ '{:.2f} hours'.format(el) for el in out ]\n return out", "def to_hours(self, timesteps, to_label=False):\n out = timesteps*self.dt/(60*60)\n if to_label:\n out = [ '{:.2f} hours'.format(el) for el in out ]\n return out", "def calculate_hours(time):\n return int(time / 3600)", "def proxy_hours_minutes(self):\n\n td = self.convert_last_col_filtered()\n resultat = td.days * 24 + td.seconds // 3600, (td.seconds // 60) % 60\n # print('{} H {} M'.format(*resultat))\n print(resultat)\n return resultat", "def adjust_date(cmd_args):\n valid_times_6 = [0, 6, 12, 18]\n valid_times_24 = [0, 12]\n\n date_in = datetime.strptime(cmd_args.date, '%Y-%m-%d-%H')\n\n if (cmd_args.period == 99):\n \"\"\"\n Seasonal accumulation\n Set the ending hour to 12z and decrement the day, if necessary\n \"\"\"\n if (date_in.hour < 12):\n date_in = date_in.replace(day = date_in.day - 1)\n date_in = date_in.replace(hour = 12)\n elif (cmd_args.period == 6):\n \"\"\"\n 6-hr accumulation\n Set the hour to the previous synoptic time if necessary\n \"\"\"\n if (date_in.hour not in valid_times_6):\n new_hr = max([i for i in valid_times_6 if date_in.hour > i])\n date_in = date_in.replace(hour = new_hr)\n else:\n if (date_in.hour not in valid_times_24):\n new_hr = max([i for i in valid_times_24 if date_in.hour > i])\n date_in = date_in.replace(hour = new_hr)\n\n return date_in", "def test_emp_man_hours(self):\n start = timezone.make_aware(dt.datetime(2016, 6, 3, 6, 30))\n stop = timezone.make_aware(dt.datetime(2016, 6, 3, 10, 30))\n emp_hours = 0\n\n expected_emp_hours = 20.95\n\n # getting employee objects that are clocked in\n clocked_in_emp = get_clocked_in(start)\n emp_that_left = get_emp_who_left_during_shift(start, stop)\n emp_that_breaked = get_emp_who_left_on_break(start, stop)\n\n # testing return of number of hours\n for employee in clocked_in_emp:\n print(\"EMP= \", employee.PRSN_NBR_TXT)\n emp_hour = get_emp_man_hours(employee, start, stop)\n print(\"EMP HOUR= \", emp_hour)\n emp_hours += emp_hour\n\n for employee in emp_that_left:\n print(\"EMP= \", employee.PRSN_NBR_TXT)\n emp_hour = get_emp_man_hours(employee, start, stop)\n print(\"EMP HOUR= \", emp_hour)\n emp_hours += emp_hour\n\n for employee in emp_that_breaked:\n print(\"EMP= \", employee.PRSN_NBR_TXT)\n emp_hour = get_emp_man_hours(employee, start, stop)\n print(\"EMP HOUR= \", emp_hour)\n emp_hours += emp_hour\n\n self.assertAlmostEqual(emp_hours, expected_emp_hours)", "def time_interval( self ):\n begin = self.begin; end = self.end\n if end - begin < 600*self.hour_switch:\n return 600\n if end - begin < 86400*self.day_switch:\n return 3600\n elif end - begin < 86400*7*self.week_switch:\n return 86400\n else:\n return 86400*7", "def test_date_interval(self, init_date, end_date):\n self.calc_earning(self.security[(self.security['Date'] > init_date) &\n (self.security['Date'] < end_date)])", "def get_image_cleaner_interval_hours(self) -> Union[int, None]:\n interval_hours = self._get_image_cleaner_interval_hours(enable_validation=True)\n\n return interval_hours", "def _get_hours_pro_entry(time_entries):\n events = []\n for event in time_entries:\n start_time = datetime.datetime(\n date.today().year,\n date.today().month,\n date.today().day,\n event.start_at.hour,\n event.start_at.minute,\n event.start_at.second,\n )\n end_time = datetime.datetime(\n date.today().year,\n date.today().month,\n date.today().day,\n event.finish_at.hour,\n event.finish_at.minute,\n event.finish_at.second,\n )\n\n timediff = end_time - start_time\n events.append(\n {\n \"worked_hours\": round(timediff.total_seconds() / 3600, DECIMALS_HOUR),\n \"event\": event,\n }\n )\n return events", "def calc_hours(ws, ws_mo, r, c):\n formula = ws[f'{c}{r}'].value.split('!')[-1]\n total = ws_mo[formula].value[5:-1]\n result = 0.0\n for row in ws_mo[total]:\n if not row:\n break\n v = row[0].value\n if v is not None:\n result += row[0].value\n return result", "def date_in_hours(date):\n delta = delta_from_now(date)\n return (delta.days * 24) + delta.seconds / (60 * 60)", "def py2_earth_hours_left(start_date=BITE_CREATED_DT):\n\n td = (PY2_DEATH_DT - start_date)\n return round((td.days*24 + td.seconds/3600), 1)", "def onchange_rh_date(self):\n if self._context.get('load_from_rh'):\n self.onchange_rh_job()", "def date_hour(date):\n return date.hour", "def calculate_time_results(\n time_start,\n trigger_time,\n):\n return {\n 'sample_time': (time_start + (trigger_time - time_start) / 2).unix*u.s,\n 'sample_time_min': time_start.unix*u.s,\n 'sample_time_max': trigger_time.unix*u.s,\n }", "def set_Hour(self, value):\n super(GetTimestampFromDateParametersInputSet, self)._set_input('Hour', value)", "def get_start_end_dates(row):\n\n if row['wo_id'] == 'UTLY' or row['wo_id'] == 'TSW':\n return row['job_start_dt'], row['job_end_dt']\n\n else:\n\n if row['job_completed_cbox'] == 1:\n return row['job_end_dt'], row['job_end_dt']\n\n else:\n return row['start'], row['end']", "def runtime_cal(start,end) :\n run_time = end - start\n mm = int(run_time/60)\n ss = round(run_time%60)\n return mm, ss", "def __hour(self):\n return _VirtualColumn(\n df_name=self.thisptr[\"df_name_\"],\n operator=\"hour\",\n operand1=self,\n operand2=None\n )", "def calculate_working_hours(logs, check_in_out_type, working_hours_calc_type):\n\ttotal_hours = 0\n\tin_time = out_time = None\n\tif check_in_out_type == 'Alternating entries as IN and OUT during the same shift':\n\t\tin_time = logs[0].time\n\t\tif len(logs) >= 2:\n\t\t\tout_time = logs[-1].time\n\t\tif working_hours_calc_type == 'First Check-in and Last Check-out':\n\t\t\t# assumption in this case: First log always taken as IN, Last log always taken as OUT\n\t\t\ttotal_hours = time_diff_in_hours(in_time, logs[-1].time)\n\t\telif working_hours_calc_type == 'Every Valid Check-in and Check-out':\n\t\t\tlogs = logs[:]\n\t\t\twhile len(logs) >= 2:\n\t\t\t\ttotal_hours += time_diff_in_hours(logs[0].time, logs[1].time)\n\t\t\t\tdel logs[:2]\n\n\telif check_in_out_type == 'Strictly based on Log Type in Employee Checkin':\n\t\tif working_hours_calc_type == 'First Check-in and Last Check-out':\n\t\t\tfirst_in_log_index = find_index_in_dict(logs, 'log_type', 'IN')\n\t\t\tfirst_in_log = logs[first_in_log_index] if first_in_log_index or first_in_log_index == 0 else None\n\t\t\tlast_out_log_index = find_index_in_dict(reversed(logs), 'log_type', 'OUT')\n\t\t\tlast_out_log = logs[len(logs)-1-last_out_log_index] if last_out_log_index or last_out_log_index == 0 else None\n\t\t\tif first_in_log and last_out_log:\n\t\t\t\tin_time, out_time = first_in_log.time, last_out_log.time\n\t\t\t\ttotal_hours = time_diff_in_hours(in_time, out_time)\n\t\telif working_hours_calc_type == 'Every Valid Check-in and Check-out':\n\t\t\tin_log = out_log = None\n\t\t\tfor log in logs:\n\t\t\t\tif in_log and out_log:\n\t\t\t\t\tif not in_time:\n\t\t\t\t\t\tin_time = in_log.time\n\t\t\t\t\tout_time = out_log.time\n\t\t\t\t\ttotal_hours += time_diff_in_hours(in_log.time, out_log.time)\n\t\t\t\t\tin_log = out_log = None\n\t\t\t\tif not in_log:\n\t\t\t\t\tin_log = log if log.log_type == 'IN' else None\n\t\t\t\telif not out_log:\n\t\t\t\t\tout_log = log if log.log_type == 'OUT' else None\n\t\t\tif in_log and out_log:\n\t\t\t\tout_time = out_log.time\n\t\t\t\ttotal_hours += time_diff_in_hours(in_log.time, out_log.time)\n\treturn total_hours, in_time, out_time", "def duration_hours(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"duration_hours\")", "def get_hourly(self):\n pass", "def _setup_volunteer_hours(\n volunteer,\n npf_admin,\n org,\n project,\n datetime_start,\n datetime_end,\n description=\"Manually tracked time \",\n event_type=\"MN\",\n is_verified=False,\n action_type='req'\n):\n event = Event.objects.create(\n project=project,\n is_public=True,\n description=\"finished event\",\n location=\"test_location\",\n coordinator=npf_admin,\n event_type=event_type,\n datetime_start=datetime_start,\n datetime_end=datetime_end\n )\n\n volunteer_timelog = UserTimeLog.objects.create(\n user=volunteer,\n event=event,\n datetime_start=datetime_start,\n datetime_end=datetime_end,\n is_verified=is_verified\n )\n\n actiontimelog = AdminActionUserTime.objects.create(\n user=npf_admin,\n usertimelog=volunteer_timelog,\n action_type=action_type\n )\n\n return volunteer_timelog, actiontimelog, event", "def DATEDIF(\n start_date: func_xltypes.XlDateTime,\n end_date: func_xltypes.XlDateTime,\n unit: func_xltypes.XlText\n) -> func_xltypes.XlNumber:\n\n if start_date > end_date:\n raise xlerrors.NumExcelError(\n f'Start date must be before the end date. Got Start: \\\n {start_date}, End: {end_date}')\n\n datetime_start_date = utils.number_to_datetime(int(start_date))\n datetime_end_date = utils.number_to_datetime(int(end_date))\n\n if str(unit).upper() == 'Y':\n date_list = list(rrule.rrule(rrule.YEARLY,\n dtstart=datetime_start_date,\n until=datetime_end_date))\n return len(date_list) - 1 # end of day to end of day / \"full days\"\n\n elif str(unit).upper() == 'M':\n date_list = list(rrule.rrule(rrule.MONTHLY,\n dtstart=datetime_start_date,\n until=datetime_end_date))\n return len(date_list) - 1 # end of day to end of day / \"full days\"\n\n elif str(unit).upper() == 'D':\n date_list = list(rrule.rrule(rrule.DAILY,\n dtstart=datetime_start_date,\n until=datetime_end_date))\n return len(date_list) - 1 # end of day to end of day / \"full days\"\n\n elif str(unit).upper() == 'MD':\n modified_datetime_start_date = datetime_start_date.replace(year=1900,\n month=1)\n modified_datetime_end_date = datetime_end_date.replace(year=1900,\n month=1)\n date_list = list(rrule.rrule(rrule.DAILY,\n dtstart=modified_datetime_start_date,\n until=modified_datetime_end_date))\n return len(date_list) - 1 # end of day to end of day / \"full days\"\n\n elif str(unit).upper() == 'YM':\n modified_datetime_start_date = datetime_start_date.replace(year=1900,\n day=1)\n modified_datetime_end_date = datetime_end_date.replace(year=1900,\n day=1)\n date_list = list(rrule.rrule(rrule.MONTHLY,\n dtstart=modified_datetime_start_date,\n until=modified_datetime_end_date))\n return len(date_list) - 1 # end of day to end of day / \"full days\"\n\n elif str(unit).upper() == 'YD':\n modified_datetime_start_date = datetime_start_date.replace(year=1900)\n modified_datetime_end_date = datetime_end_date.replace(year=1900)\n date_list = list(rrule.rrule(rrule.DAILY,\n dtstart=modified_datetime_start_date,\n until=modified_datetime_end_date))\n return len(date_list) - 1 # end of day to end of day / \"full days\"", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s = Schedule()\n s.hour_from = 3\n s.min_from = 0\n s.hour_to = 3\n s.min_to = 59\n s.interval = 60*60*6 \n\n r = number_expected([s,],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 0 )", "def report_start_and_end_date(self):\n start_date, end_date = self.start_date, self.end_date\n if start_date:\n db_import_time = time.strptime(str(start_date), \"%Y-%m-%d %H:%M:%S\")\n db_import_time = time.strftime(\"%Y-%m-%dT%H:%M:%S\", db_import_time)\n start_date = time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.gmtime(\n time.mktime(time.strptime(db_import_time, \"%Y-%m-%dT%H:%M:%S\"))))\n start_date = str(start_date) + 'Z'\n else:\n today = datetime.now()\n earlier = today - timedelta(days=30)\n earlier_str = earlier.strftime(\"%Y-%m-%dT%H:%M:%S\")\n start_date = earlier_str + 'Z'\n\n if end_date:\n db_import_time = time.strptime(str(end_date), \"%Y-%m-%d %H:%M:%S\")\n db_import_time = time.strftime(\"%Y-%m-%dT%H:%M:%S\", db_import_time)\n end_date = time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.gmtime(\n time.mktime(time.strptime(db_import_time, \"%Y-%m-%dT%H:%M:%S\"))))\n end_date = str(end_date) + 'Z'\n else:\n today = datetime.now()\n earlier_str = today.strftime(\"%Y-%m-%dT%H:%M:%S\")\n end_date = earlier_str + 'Z'\n\n return start_date, end_date", "def getDateRange(start, end=None, kind=None, zone=None, hour=0, eod=None, end_eod=None):\n if start is None:\n start = datetime.now()\n if zone is None or zone == \"\":\n zone = \"UTC\"\n if start == end:\n end = None\n if eod != None:\n hour = eod\n\n start = parseDate(start)\n orig_start = start\n if end:\n end = parseDate(end)\n if start == end:\n end = None\n\n if kind:\n start = start.replace(minute=0, second=0, microsecond=0)\n if kind == \"hour\":\n end = start + timedelta(hours=1)\n elif kind == \"day\":\n start = start.replace(hour=0)\n end = start + timedelta(hours=24)\n elif kind == \"week\":\n start = start.replace(hour=0)\n start, end = getWeek(start)\n elif kind == \"month\":\n start = start.replace(hour=0, day=1)\n end = getEndOfMonth(start)\n elif kind == \"year\":\n start = start.replace(hour=0, day=1, month=1)\n end = getEndOfMonth(start.replace(month=12))\n elif type(kind) is int or (isinstance(kind, str) and kind.isdigit()):\n end = start + timedelta(hours=24)\n start = end - timedelta(days=int(kind))\n if end is None:\n end = start + timedelta(hours=24)\n\n if zone and zone.lower() == \"utc\":\n zone = None\n if not kind and eod:\n hour = None\n # now lets convert our times to the zone\n if zone or hour:\n if zone is None:\n zone = \"UTC\"\n offset = getTimeZoneOffset(zone, start, hour=hour)\n if offset:\n start = start + timedelta(hours=offset)\n if end_eod:\n hour = end_eod\n offset = getTimeZoneOffset(zone, end, hour=hour)\n if offset:\n end = end + timedelta(hours=offset)\n\n # print(\"daterange: {} to {}\".format(start, end))\n return start, end", "def horde_start(self, observation):", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s = Schedule()\n s.hour_from = 22\n s.min_from = 0\n s.hour_to = 21\n s.min_to = 59\n s.interval = 60*60\n\n r = number_expected([s,],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 6 )", "def getHoursOffset(self):\n return _libsbml.Date_getHoursOffset(self)", "def test_calculate_value(self):\n user = CustomUser.objects.get(username = \"Test User\")\n start_date = timezone.now().replace(day=1,month=8,year=2020)\n end_date = start_date + timedelta(days=30) \n expected_result = 720\n actual_result = calculate_total_user_expense_value_in_timeperiod(user, start_date, end_date)\n self.assertEquals(expected_result, actual_result)", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s1 = Schedule()\n s1.hour_from = 0\n s1.min_from = 0\n s1.hour_to = 21\n s1.min_to = 59\n s1.interval = 60*60*3 \n\n s2 = Schedule()\n s2.hour_from = 0\n s2.min_from = 0\n s2.hour_to = 21\n s2.min_to = 59\n s2.interval = 60*60*3 \n\n r = number_expected([s1,s2],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 2 )", "def how_much_hours(username, password, workers, projects, start_date, end_date):\n tt = TTrackerSession()\n tt.login(username, password)\n return tt.how_much_hours(workers, projects, start_date, end_date)", "def test_reported_during_workhours(self):\n before_workhour = datetime.datetime(2020, 8, 21, 8, 59)\n at_nine = datetime.datetime(2020, 8, 21, 9, 0)\n at_random = datetime.datetime(2020, 8, 21, 11, 32)\n at_five = datetime.datetime(2020, 8, 21, 17, 0)\n after_workhour = datetime.datetime(2020, 8, 21, 17, 2)\n\n self.assertRaises(ValueError, calculate_due_date, before_workhour, 1)\n calculate_due_date(at_nine, 1)\n calculate_due_date(at_random, 1)\n self.assertRaises(ValueError, calculate_due_date, at_five, 1)\n self.assertRaises(ValueError, calculate_due_date, after_workhour, 1)", "def active_hours(self):\n return self._active_hours", "def hours(self):\n return int(self.minutes / 60)", "def office_time_between(a, b, start = timedelta(hours = 8),\n stop = timedelta(hours = 17)):\n zero = timedelta(0)\n assert(zero <= start <= stop <= timedelta(1))\n office_day = stop - start\n working_days = full_in_between_working_days(a, b) \n \n print(working_days)\n\n total = office_day * working_days\n print(total)\n # Calculate the time adusted deltas for the the start and end days\n a_delta = adjust_hour_delta(a, start, stop)\n b_delta = adjust_hour_delta(b, start, stop)\n\n\n if a.date() == b.date():\n # If this was a weekend, ignore\n if a.weekday() < 5:\n total = total + b_delta - a_delta\n else:\n # We now consider if the start day was a weekend\n if a.weekday() > 4:\n a_worked = zero\n else:\n a_worked = stop - a_delta\n # And if the end day was a weekend\n if b.weekday() > 4:\n b_worked = zero\n else:\n b_worked = b_delta - start\n total = total + a_worked + b_worked\n\n return total", "def calculate_hr_ee(self):\n\n # HR - resting HR = net HR\n net_hr = np.array([i - self.rest_hr if i is not None else None for i in self.df_epoch[\"HR\"]])\n\n # Sets values below 0% HRR (below resting HR) to 0\n net_hr[net_hr <= 0] = 0\n\n # Equation from Brage et al., 2004. Active EE in kJ/kg/min\n kj_kg_min = [.011 * (hr ** 2) + 5.82 * hr if hr is not None else None for hr in net_hr]\n\n # Converts kJ to kcal: relative EE (kcal/kg/min)\n kcal_kg_min = [k / 4.184 if k is not None else None for k in kj_kg_min]\n\n # Converts relative EE to absolute EE (kcal/min)\n kcal_min = [k * self.weight / 1000 if k is not None else None for k in kcal_kg_min]\n\n # kcals per epoch instead of per minute\n kcal_epoch = [k * (15 / 60) for k in kcal_min]\n\n total_ee = sum([i for i in kcal_epoch if not np.isnan(i)])\n print(\"-Total energy expenditure estimated from HR is {} kcal.\".format(int(total_ee)))\n\n self.df_epoch[\"HR_EE\"] = kcal_min", "def _compute_date_from_x_start_date(self):\n for ts_line in self:\n if ts_line.x_start_date:\n st_datetime = fields.Datetime.from_string(\n ts_line.x_start_date)\n # autocomplete date from start date\n st_date_tz = fields.Datetime.context_timestamp(\n self, st_datetime).date()\n ts_line.date = st_date_tz", "def date_start_end(mytrip_start_date,mytrip_end_date):\n mytrip_start_date = dt.date(2015, 8, 10)\n mytrip_end_date= dt.date(2015, 8,14)\n prev_year = dt.timedelta(days=365)\n start_dt_strftime=dt.datetime.strptime('2014-08-10',\"%Y-%m-%d\")\n end_dt_strftime=dt.datetime.strptime('2014-08-14',\"%Y-%m-%d\") \n date_start_end_results=session.query(func.min(measurements.tobs), func.avg(measurements.tobs),func.max(measurements.tobs)).\\\n filter(measurements.date >= mytrip_start_date).filter(measurements.date <= end_dt_strftime).all()\n return(date_start_end_results)", "def running_custom_hour(arg):\n pass", "def calc_temps(start_date, end_date):\r\n\r\n print(\"two dates\\n\")\r\n return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\r\n filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()", "def setHoursOffset(self, *args):\n return _libsbml.Date_setHoursOffset(self, *args)", "def _compute_duration_overtime(self):\n diff_float = 0\n for ts_line in self:\n if ts_line.x_start_date:\n st_datetime = fields.Datetime.from_string(\n ts_line.x_start_date)\n en_datetime = fields.Datetime.from_string(\n ts_line.x_end_date)\n diff = en_datetime - st_datetime\n if not ts_line.is_overtime:\n if(time(1, 00) <= st_datetime.time() <= time(5, 00)):\n if(time(6, 00) <= en_datetime.time() <= time(10, 00)):\n # del 1 hour for breaking lunch\n diff_float = round(\n diff.total_seconds() / 3600.0, 2)-1\n else:\n diff_float = round(diff.total_seconds() / 3600.0, 2)\n ts_line.x_is_per_diem = False\n ts_line.unit_amount = diff_float", "def working_hours_by_day(self, day):\r\n availabilities = self.sorted_availabilities(day)\r\n options = []\r\n if not availabilities:\r\n return 0\r\n for availability in availabilities:\r\n count = 0\r\n while True:\r\n new = time(availability.start.hour + count, 0)\r\n options.append((new.hour, new.__str__()))\r\n count += 1\r\n if new >= availability.end:\r\n break\r\n return options", "def get_working_hour(self):\n working_hrs_id = self.search([('active', '=', True)])\n if working_hrs_id:\n return {\n 'biz_open_time': time(int(working_hrs_id.start_hour), int(working_hrs_id.start_minute), 0),\n 'biz_close_time': time(int(working_hrs_id.end_hour), int(working_hrs_id.end_minute), 0),\n 'holiday_list': {line.occ_date: line.name for line in working_hrs_id.non_working_days_line},\n 'deadline_revise_percentage': working_hrs_id.deadline_revise_percentage,\n }\n raise ValidationError(\"Working Hour configuration is missing!\")", "def sum_hours(self):\n return sum([le.num_hrs for le in self.log_entries])", "def i_see_the_set_dates(_step):\r\n verify_date_or_time(COURSE_START_DATE_CSS, '12/20/2013')\r\n verify_date_or_time(COURSE_END_DATE_CSS, '12/26/2013')\r\n verify_date_or_time(ENROLLMENT_START_DATE_CSS, '12/01/2013')\r\n verify_date_or_time(ENROLLMENT_END_DATE_CSS, '12/10/2013')\r\n\r\n verify_date_or_time(COURSE_START_TIME_CSS, DUMMY_TIME)\r\n # Unset times get set to 12 AM once the corresponding date has been set.\r\n verify_date_or_time(COURSE_END_TIME_CSS, DEFAULT_TIME)\r\n verify_date_or_time(ENROLLMENT_START_TIME_CSS, DEFAULT_TIME)\r\n verify_date_or_time(ENROLLMENT_END_TIME_CSS, DUMMY_TIME)", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s = Schedule()\n s.hour_from = 0\n s.min_from = 0\n s.hour_to = 21\n s.min_to = 59\n s.interval = 60*60*3 \n\n r = number_expected([s,],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 2 )", "def date_range(self):\n start_date = input(\"Enter a start date in the format DD/MM/YYYY> \")\n end_date = input(\"Enter an end date in the format DD/MM/YYYY> \")\n return start_date, end_date", "def get_hourly_exits(df):\n df['EXITSn_hourly'] = (df['EXITSn'] - df['EXITSn'].shift(1)).fillna(0)\n return df", "def calc_diff(starting, ending, type='days'):\n # Note the cheeky minus at the beginning\n if type == 'days':\n try:\n return (datetime.strptime(ending, format) - datetime.strptime(starting, format)).days\n except ValueError:\n return np.nan\n if type == 'weeks':\n return round((datetime.strptime(ending, format) - datetime.strptime(starting, format)).days / 7)\n if type == 'months':\n return (datetime.strptime(ending, format).year - datetime.strptime(starting, format).year) * 12 \\\n + (datetime.strptime(ending, format).month - datetime.strptime(starting, format).month)", "def start1(self): \n return self.ddmmyyyy(self.rowTime.start)", "def preprocess_hours_extend_workdays(business):\n\tworkdays = list()\n\tfor (day, hour) in business[HOURS].items():\n\t\tworkdays.append(day)\n\t\tstart_end = hour.split(\"-\")\n\t\tbusiness[WORKDAYS_START(day)] = start_end[0]\n\t\tbusiness[WORDDAYS_END(day)] = start_end[1]\n\n\tbusiness[WORKDAYS] = workdays", "def test_set_begin_and_end_for_emp(self):\n start = timezone.make_aware(dt.datetime(2016, 6, 3, 6, 30))\n stop = timezone.make_aware(dt.datetime(2016, 6, 3, 10, 30))\n expected_begin = timezone.make_aware(dt.datetime(2016, 6, 3, 6, 30))\n expected_end = timezone.make_aware(dt.datetime(2016, 6, 2, 14, 32))\n\n example_employee = RawClockData.objects.first()\n begin, end = set_begin_and_end_for_emp(\n employee=example_employee,\n start=start,\n stop=stop,\n )\n\n self.assertEqual(expected_begin, begin)\n self.assertEqual(expected_end, end)", "def submit_hours(self, report):\n raise NotImplementedError", "def hourly(x):\n ann_salary = x * 40.0 * 52.0 #assumes 40 hours a week and 52 weeks in a year\n print(f\"The anual salary is ${ann_salary:,.2f}\")", "def onchange_start_date(self, start_date=False):\n if not start_date:\n return {}\n result = {'value': {'last_renovation_date': start_date}}\n return result", "def extract_timeperiod(request):\n data = request.data.keys()\n\n if 'start_date' in data and 'end_date' in data:\n start_date = datetime.strptime(request.data['start_date'], date_format)\n end_date = datetime.strptime(request.data['end_date'], date_format)\n elif 'start_date' in data:\n start_date = datetime.strptime(request.data['start_date'], date_format)\n end_date = datetime.now()\n else:\n default_timeperiod = 7\n end_date = datetime.now()\n start_date = end_date - timedelta(days=default_timeperiod)\n\n return (start_date, end_date)", "def test_work_time_line_changes_by_date_picker(self):\n\n month_from = '11'\n year_from = '2014'\n day_from = '01'\n\n month_to = '11'\n year_to = '2014'\n day_to = '31'\n\n days_count = 31\n\n self.create_page.ad_form\\\n .set_work_time_by_date_picker(self.create_page.ad_form.WORK_TIME_DATE_FROM,\n month_from,\n year_from,\n day_from)\n\n self.create_page.ad_form\\\n .set_work_time_by_date_picker(self.create_page.ad_form.WORK_TIME_DATE_TO,\n month_to,\n year_to,\n day_to)\n\n text = self.create_page.ad_form.get_work_time_line_text()\n actual_days_count = int(text.split()[0])\n\n self.assertEquals(days_count, actual_days_count)", "def test_holidays_validate(self):\n self.request.sudo(self.user_1.id).holidays_validate()\n\n accrual = self.employee.get_leave_accrual(self.leave_type.id)\n self.assertEqual(accrual.total_hours, 22.5)", "def absulute2relative_time(x): \n if x.viewed:\n x.viewed_reltime=x.viewed_time-x.start\n \n if x.completed:\n x.completed_reltime=x.completed_time-x.start\n \n return x", "def calc_temps(start_date, end_date):\r\n startend_results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()\r\n\r\n #convert list of tuples into normal list\r\n startend_calculations = list(np.ravel(startend_results))\r\n\r\n return jsonify(startend_calculations)", "def finish_hour(self):\n\t\tassert len(self.values) >= 4, 'A fully formed update date is needed.'\n\t\tself.values = self.values[:4]", "def compute_kwh_price(supplier_with_transaction):\n\n supplier_item = supplier_with_transaction.get('supplier_detail')\n total_kwh_price = 0\n if supplier_item.get('has_time_based_kwh') and supplier_item.get('time_price'):\n # start to compute as complex\n for rec in supplier_item.get('time_price'):\n if rec.get('hour_from') and rec.get('hour_to'):\n if rec.get('hour_from') > rec.get('hour_to'):\n duration = (rec.get('hour_to') - rec.get('hour_from')) * 60\n else:\n duration = (rec.get('hour_to') - (24 - rec.get('hour_from'))) * 60\n else:\n duration = 0\n total_kwh_price += duration * rec.get('kwh_price', 0)\n else:\n # start to calculate the simple version for kwh price\n total_kwh_price = 24 * supplier_item.get('kwh_price', 0)\n return total_kwh_price", "def start_end_date(start_date=dt.date.today().strftime('%Y-%m-%d'),end_date = dt.date.today().strftime('%Y-%m-%d')):\n\n sel =[func.min(Measurement.tobs),\n func.avg(Measurement.tobs),\n func.max(Measurement.tobs)\n ]\n \n result = session.query(*sel).filter((Measurement.date >= start_date) & (Measurement.date <= end_date)).all()\n \n temperature_values = {}\n calculated_values = []\n \n temperature_values[\"min_temp\"] = result[0][0]\n temperature_values[\"avg_temp\"] = result[0][1]\n temperature_values[\"max_temp\"] = result[0][2]\n calculated_values.append(temperature_values)\n \n return jsonify(calculated_values)", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s1 = Schedule()\n s1.hour_from = 0\n s1.min_from = 30\n s1.hour_to = 23\n s1.min_to = 30\n s1.interval = 60*30\n\n s2 = Schedule()\n s2.hour_from = 0\n s2.min_from = 30\n s2.hour_to = 23\n s2.min_to = 30\n s2.interval = 60*60\n\n s3 = Schedule()\n s3.hour_from = 22\n s3.min_from = 0\n s3.hour_to = 23\n s3.min_to = 30\n s3.interval = 60*5\n\n\n r = number_expected([s1,s2,s3],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 25 )", "def date_in_range(start, end, x):\n\n if start <= end:\n return start <= x <= end\n else:\n return start <= x or x <= end", "def test_check_args_working_hours(self):\n test_date = dt.datetime(2021, 6, 18, 7, 0, 0)\n with self.assertRaises(ValueError) as context:\n self.duedate.check_args(test_date, self.test_turn_time)\n self.assertTrue(\n \"You can submit requests from 9AM till 5PM.\" in str(context.exception))", "def handle_interval(time_range: datetime, is_hours_interval: bool = True, is_days_interval: bool = False):\n\n current_time = datetime.utcnow()\n intervals = []\n if current_time - time_range > timedelta(\n days=7): # The maximum time range of Proofpoint TAP API requests is 7 days minus one minute.\n time_range += timedelta(minutes=1)\n\n if is_days_interval:\n while current_time - time_range > timedelta(days=1):\n start = time_range.strftime(DATE_FORMAT)\n time_range += timedelta(days=1)\n intervals.append(f'{start}/{time_range.strftime(DATE_FORMAT)}')\n\n if is_hours_interval:\n while current_time - time_range > timedelta(hours=1):\n start = time_range.strftime(DATE_FORMAT)\n time_range += timedelta(hours=1)\n intervals.append(f'{start}/{time_range.strftime(DATE_FORMAT)}')\n\n return intervals", "def check_wrong_time(self, cr, uid, att, context=None):\n # check have overtime yet?\n att_name = datetime.strptime(att.name, DEFAULT_SERVER_DATETIME_FORMAT)\n param_obj = self.pool.get('ir.config_parameter') \n max_early = param_obj.get_param(cr, uid, 'maximum_early_minutes', default=60)\n max_late = param_obj.get_param(cr, uid, 'maximum_late_minutes', default=60)\n try:\n max_early = int (max_early)\n max_late = int (max_late)\n except:\n raise except_osv(_(\"Warning !\"),_(\"maximum_early_minutes or maximum_late_minutes in config parameter is incorrect\"))\n \n time_early = att_name + timedelta(minutes = max_early)\n time_late = att_name - timedelta(minutes = max_late)\n \n overtime_obj = self.pool.get('hr.overtime')\n overtime_confirmed_ids = overtime_obj.search(cr, uid, [('employee_id', '=', att.employee_id.id),\n ('mode', '=', 'by_employee'),\n ('name', '=', att.day_tz),\n ('datetime_start', '<=', time_early.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('datetime_stop', '>=', time_late.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('state', 'in', ['confirmed'])\n ])\n if overtime_confirmed_ids:\n return False\n working_hour_obj = self.pool.get('hr.payroll.working.hour')\n \n \n \n \n working_hour_ids = working_hour_obj.search(cr, uid, [('employee_id', '=', att.employee_id.id),\n ('expected_start', '<=', time_early.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('expected_end', '>=', time_late.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ], context=context)\n if not working_hour_ids:\n return True\n return False", "def calculate_statistics(self) -> Dict[str, Tuple[str, float]]:\n tempDict = {\n 'max_start': ('', -1),\n 'max_end': ('', -1),\n 'max_time_low_availability': ('', -1),\n 'max_time_low_unoccupied': ('', -1)\n }\n\n\n\n return {\n 'max_start': ('', -1),\n 'max_end': ('', -1),\n 'max_time_low_availability': ('', -1),\n 'max_time_low_unoccupied': ('', -1)\n }" ]
[ "0.598755", "0.5943536", "0.58988446", "0.5735438", "0.57317203", "0.5616081", "0.55189914", "0.54957277", "0.54912084", "0.538175", "0.5366651", "0.53358847", "0.53107077", "0.53098565", "0.5307729", "0.52965957", "0.5288884", "0.52459323", "0.5241022", "0.52161556", "0.5204041", "0.51986283", "0.5193926", "0.51912034", "0.51795906", "0.51781684", "0.51781684", "0.51781684", "0.51781684", "0.5170696", "0.5160131", "0.5152943", "0.51404107", "0.5116396", "0.508868", "0.50698525", "0.5054522", "0.5034036", "0.5032074", "0.50298154", "0.5027469", "0.5019844", "0.5011365", "0.50113386", "0.49983877", "0.4997351", "0.4989442", "0.49822718", "0.49737504", "0.49654567", "0.49644184", "0.49577433", "0.49415925", "0.49364874", "0.4902188", "0.48904607", "0.48892277", "0.4887462", "0.4874605", "0.48717564", "0.4870749", "0.48690626", "0.48688078", "0.48637298", "0.48606446", "0.4856475", "0.4854519", "0.4849039", "0.48418784", "0.4841435", "0.48401064", "0.48375407", "0.483345", "0.48322365", "0.48317552", "0.48175913", "0.48110124", "0.47979936", "0.4797078", "0.47965276", "0.47959864", "0.4795731", "0.47908717", "0.47900167", "0.47888225", "0.47875857", "0.47753242", "0.47732016", "0.47719437", "0.47693467", "0.4767704", "0.47661585", "0.47604588", "0.4758894", "0.47450402", "0.47367123", "0.4736271", "0.47362515", "0.47334692", "0.4728222" ]
0.5383492
9
auto calculate 'hours' onchange of 'is_overtime
def _compute_duration_overtime(self): diff_float = 0 for ts_line in self: if ts_line.x_start_date: st_datetime = fields.Datetime.from_string( ts_line.x_start_date) en_datetime = fields.Datetime.from_string( ts_line.x_end_date) diff = en_datetime - st_datetime if not ts_line.is_overtime: if(time(1, 00) <= st_datetime.time() <= time(5, 00)): if(time(6, 00) <= en_datetime.time() <= time(10, 00)): # del 1 hour for breaking lunch diff_float = round( diff.total_seconds() / 3600.0, 2)-1 else: diff_float = round(diff.total_seconds() / 3600.0, 2) ts_line.x_is_per_diem = False ts_line.unit_amount = diff_float
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hours(input=None):\n return get(input).hours", "def calculate_hours(time):\n return int(time / 3600)", "def interval_hours(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"interval_hours\")", "def _get_number_of_hours(self):\n if self.date_to:\n DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n from_dt = datetime.strptime(self.date_from, DATETIME_FORMAT)\n to_dt = datetime.strptime(self.date_to, DATETIME_FORMAT)\n timedelta = to_dt - from_dt\n diff_day =(float(timedelta.seconds) / 3600) - self.break_hour\n self.number_of_hours_temp = diff_day", "def hours(self):\n return self.config['hours']", "def check_wrong_time(self, cr, uid, att, context=None):\n # check have overtime yet?\n att_name = datetime.strptime(att.name, DEFAULT_SERVER_DATETIME_FORMAT)\n param_obj = self.pool.get('ir.config_parameter') \n max_early = param_obj.get_param(cr, uid, 'maximum_early_minutes', default=60)\n max_late = param_obj.get_param(cr, uid, 'maximum_late_minutes', default=60)\n try:\n max_early = int (max_early)\n max_late = int (max_late)\n except:\n raise except_osv(_(\"Warning !\"),_(\"maximum_early_minutes or maximum_late_minutes in config parameter is incorrect\"))\n \n time_early = att_name + timedelta(minutes = max_early)\n time_late = att_name - timedelta(minutes = max_late)\n \n overtime_obj = self.pool.get('hr.overtime')\n overtime_confirmed_ids = overtime_obj.search(cr, uid, [('employee_id', '=', att.employee_id.id),\n ('mode', '=', 'by_employee'),\n ('name', '=', att.day_tz),\n ('datetime_start', '<=', time_early.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('datetime_stop', '>=', time_late.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('state', 'in', ['confirmed'])\n ])\n if overtime_confirmed_ids:\n return False\n working_hour_obj = self.pool.get('hr.payroll.working.hour')\n \n \n \n \n working_hour_ids = working_hour_obj.search(cr, uid, [('employee_id', '=', att.employee_id.id),\n ('expected_start', '<=', time_early.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ('expected_end', '>=', time_late.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),\n ], context=context)\n if not working_hour_ids:\n return True\n return False", "def get_hourly(self):\n pass", "def hours(self):\n return int(self.minutes / 60)", "def get_hours_by_weekday(self, cr, uid, tpl_id, day_no, context=None):\n\n delta = timedelta(seconds=0)\n tpl = self.browse(cr, uid, tpl_id, context=context)\n for worktime in tpl.worktime_ids:\n if int(worktime.dayofweek) != day_no:\n continue\n\n fromHour, fromSep, fromMin = worktime.hour_from.partition(':')\n toHour, toSep, toMin = worktime.hour_to.partition(':')\n if len(fromSep) == 0 or len(toSep) == 0:\n raise orm.except_orm(\n 'Invalid Data', 'Format of working hours is incorrect')\n\n delta += (\n datetime.strptime(toHour + ':' + toMin, '%H:%M') -\n datetime.strptime(fromHour + ':' + fromMin, '%H:%M')\n )\n\n return float(delta.seconds / 60) / 60.0", "def tradeHours(self, context):\n raise NotImplementedError", "def getHour(self, parent):\r\n self.now = datetime.now()\r\n self.current_time = self.now.strftime(\"%H:%M:%S\")\r\n self.lineEditWidgets[\"HORA\"].setText(self.current_time)", "def _work_hour_value(self):\n if self.month_workdays == 0 or self.workday_hours == 0:\n self.work_hour_value = 0\n else:\n self.work_hour_value = round(self.wage / self.month_workdays / self.workday_hours, 2)", "def to_hours(self, timesteps, to_label=False):\n out = timesteps*self.dt/(60*60)\n if to_label:\n out = [ '{:.2f} hours'.format(el) for el in out ]\n return out", "def to_hours(self, timesteps, to_label=False):\n out = timesteps*self.dt/(60*60)\n if to_label:\n out = [ '{:.2f} hours'.format(el) for el in out ]\n return out", "def to_hours(self, timesteps, to_label=False):\n out = timesteps*self.dt/(60*60)\n if to_label:\n out = [ '{:.2f} hours'.format(el) for el in out ]\n return out", "def to_hours(self, timesteps, to_label=False):\n out = timesteps*self.dt/(60*60)\n if to_label:\n out = [ '{:.2f} hours'.format(el) for el in out ]\n return out", "def failed_per_hour(self):\r\n return (3600.*(self.circ_failed+self.strm_failed))/self.current_uptime()", "def hour(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"hour\")", "def active_hours(self):\n return self._active_hours", "def is_hourly(self):\n if self.wage_type == \"hourly\":\n return True\n return False", "def suspected_per_hour(self):\r\n return (3600.*(self.circ_suspected+self.strm_suspected\r\n +self.circ_failed+self.strm_failed))/self.current_uptime()", "def duration_hours(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"duration_hours\")", "def build_convert_to_hours(time_units):\n if time_units not in VALID_TIME_UNITS:\n raise ValueError('Time units must be one of', VALID_TIME_UNITS)\n \n if time_units == 'min':\n return lambda x: x/60\n elif time_units == 'h':\n return lambda x: x", "def hour(self) -> int:\n return pulumi.get(self, \"hour\")", "def remaintime_hour(self):\n return self._get_time_info([\"Remain_Time_H\", \"remainTimeHour\"])", "def hours_in(sec):\r\n return int(sec//3600)", "def calculate_seconds_in_hours(hours):\n return int(hours * 3600)", "def time_interval( self ):\n begin = self.begin; end = self.end\n if end - begin < 600*self.hour_switch:\n return 600\n if end - begin < 86400*self.day_switch:\n return 3600\n elif end - begin < 86400*7*self.week_switch:\n return 86400\n else:\n return 86400*7", "def hour(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"hour\")", "def hour(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"hour\")", "def hour(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"hour\")", "def running_custom_hour(arg):\n pass", "def is_home_hour(self, time_of_day):\n return time_of_day >= self.constants.HOME_HOUR_START / \\\n self.constants.DURATION_MAX", "def get_hours(self, date = \"\"):\n\n if date == \"\":\n DATE = datetime.today()\n else:\n year, month, day = date.split('-')\n DATE = datetime(int(year), int(month), int(day))\n\n s = requests.get(\"https://api.wdpro.disney.go.com/facility-service/schedules/{}?date={}-{}-{}\".format(self.__id, DATE.year, self.__formatDate(str(DATE.month)), self.__formatDate(str(DATE.day))), headers=getHeaders())\n data = json.loads(s.content)\n\n operating_hours_start = None\n operating_hours_end = None\n extra_hours_start = None\n extra_hours_end = None\n\n try:\n for i in range(len(data['schedules'])):\n if data['schedules'][i]['type'] == 'Operating':\n operating_hours_start = datetime(DATE.year, DATE.month, DATE.day, int(data['schedules'][i]['startTime'][0:2]), int(data['schedules'][i]['startTime'][3:5]))\n if int(data['schedules'][i]['endTime'][0:2]) >= 0 and int(data['schedules'][i]['endTime'][0:2]) <= 7:\n DATETEMP = DATE + timedelta(days=1)\n operating_hours_end = datetime(DATETEMP.year, DATETEMP.month, DATETEMP.day, int(data['schedules'][i]['endTime'][0:2]), int(data['schedules'][i]['endTime'][3:5]))\n else:\n operating_hours_end = datetime(DATE.year, DATE.month, DATE.day, int(data['schedules'][i]['endTime'][0:2]), int(data['schedules'][i]['endTime'][3:5]))\n\n if data['schedules'][i]['type'] == \"Special Ticketed Event\":\n extra_hours_start = datetime(DATE.year, DATE.month, DATE.day, int(data['schedules'][i]['startTime'][0:2]), int(data['schedules'][i]['startTime'][3:5]))\n if int(data['schedules'][i]['endTime'][0:2]) >= 0 and int(data['schedules'][i]['endTime'][0:2]) <= 7:\n DATETEMP = DATE + timedelta(days=1)\n extra_hours_end = datetime(DATETEMP.year, DATETEMP.month, DATETEMP.day, int(data['schedules'][i]['endTime'][0:2]), int(data['schedules'][i]['endTime'][3:5]))\n else:\n operating_hours_end = datetime(DATE.year, DATE.month, DATE.day, int(data['schedules'][i]['endTime'][0:2]), int(data['schedules'][i]['endTime'][3:5]))\n\n except KeyError:\n pass\n return operating_hours_start, operating_hours_end, extra_hours_start, extra_hours_end", "def hours_on(series, on_power_threshold=DEFAULT_ON_POWER_THRESHOLD):\n\n i_above_threshold = np.where(series[:-1] >= on_power_threshold)[0]\n # now calculate timedelta ('td') above threshold...\n td_above_thresh = (series.index[i_above_threshold + 1].values -\n series.index[i_above_threshold].values)\n secs_on = timedelta64_to_secs(td_above_thresh.sum())\n return secs_on / SEC_PER_HOUR", "def get_hours():\n\n print('This program calculates fees paid per hour.Enter hour in H:m using the 24 hour format.')\n\n today = datetime.today()\n start_time = input('Enter time started in H:m format: ')\n end_time = input('Enter time ended in H:m format: ')\n task = input('Enter task name: ')\n description = input('Give a brief description of task: ')\n\n # start_time_str = start_time\n start_time = datetime.strptime(start_time, '%H:%M').time()\n end_time = datetime.strptime(end_time, '%H:%M').time()\n\n # print(start_time_object, end_time_object)\n\n time_elapsed = datetime.combine(\n datetime.today(), end_time) - datetime.combine(date.today(), start_time)\n total_seconds = time_elapsed.seconds\n # print(total_seconds)\n hours = total_seconds/3600\n\n print('Number of hours spent on task is ', hours, 'hours.')\n\n get_price(hours)\n save_to_csv(today, task, description, hours, start_time, end_time)", "def check_hour_range(self, hour):\n if 0 <= hour <= 5:\n return 'Early Morning'\n if 6 <= hour <= 11:\n return 'Day Time'\n if 12 <= hour <= 17:\n return 'Afternoon'\n if 18 <= hour <= 23:\n return 'Evening'", "def working_hours_by_day(self, day):\r\n availabilities = self.sorted_availabilities(day)\r\n options = []\r\n if not availabilities:\r\n return 0\r\n for availability in availabilities:\r\n count = 0\r\n while True:\r\n new = time(availability.start.hour + count, 0)\r\n options.append((new.hour, new.__str__()))\r\n count += 1\r\n if new >= availability.end:\r\n break\r\n return options", "def check_hours():\n while True:\n business_object = query_business_name()\n if business_object == \"back\":\n return\n elif business_object is None:\n continue\n\n print(f\"{business_object['name']} hours are: \"\n f\"{business_object['hours']}\")", "def proxy_hours_minutes(self):\n\n td = self.convert_last_col_filtered()\n resultat = td.days * 24 + td.seconds // 3600, (td.seconds // 60) % 60\n # print('{} H {} M'.format(*resultat))\n print(resultat)\n return resultat", "def date_hour(date):\n return date.hour", "def _check_hours(self):\n for record in self:\n if record.start_hour or record.end_hour:\n if int(record.start_hour) == int(record.end_hour):\n raise ValidationError(_('Please enter different Start Hours and End Hours!'))\n if int(record.start_hour) > int(record.end_hour):\n raise ValidationError(_('Start hours can not be greater than end hours for the day.'))", "def GAME_TIME_ADVANCE(dt):", "def reservetime_hour(self):\n return self._get_time_info([\"Reserve_Time_H\", \"reserveTimeHour\"])", "def get_hourly_exits(df):\n df['EXITSn_hourly'] = (df['EXITSn'] - df['EXITSn'].shift(1)).fillna(0)\n return df", "def how_much_hours(username, password, workers, projects, start_date, end_date):\n tt = TTrackerSession()\n tt.login(username, password)\n return tt.how_much_hours(workers, projects, start_date, end_date)", "def _check_hour_data(self, ls_row):\n for attr in ['kt_re', 'kt_im', 'div', 'type', 'area_code']:\n self._check_datum(attr, getattr(ls_row, attr))", "def hours_studied(self):\n value = input(\"Enter value (or 'exit')>>>\")\n while not self.is_float(value):\n value = input(\"Enter value (or 'exit')>>>\")\n\n # Escape command\n if value == 'exit':\n return value\n\n return float(value)", "def test_02_stats_hours(self):\r\n hour = unicode(datetime.datetime.utcnow().strftime('%H'))\r\n with self.flask_app.test_request_context('/'):\r\n hours, hours_anon, hours_auth, max_hours,\\\r\n max_hours_anon, max_hours_auth = stats.stats_hours(1)\r\n print hours\r\n for i in range(0, 24):\r\n # There should be only 10 answers at current hour\r\n if str(i).zfill(2) == hour:\r\n err_msg = \"At time %s there should be 10 answers\" \\\r\n \"but there are %s\" % (str(i).zfill(2),\r\n hours[str(i).zfill(2)])\r\n assert hours[str(i).zfill(2)] == 10, \"There should be 10 answers\"\r\n else:\r\n err_msg = \"At time %s there should be 0 answers\" \\\r\n \"but there are %s\" % (str(i).zfill(2),\r\n hours[str(i).zfill(2)])\r\n assert hours[str(i).zfill(2)] == 0, err_msg\r\n\r\n if str(i).zfill(2) == hour:\r\n tmp = (hours_anon[hour] + hours_auth[hour])\r\n assert tmp == 10, \"There should be 10 answers\"\r\n else:\r\n tmp = (hours_anon[str(i).zfill(2)] + hours_auth[str(i).zfill(2)])\r\n assert tmp == 0, \"There should be 0 answers\"\r\n err_msg = \"It should be 10, as all answers are submitted in the same hour\"\r\n tr = db.session.query(TaskRun).all()\r\n for t in tr:\r\n print t.finish_time\r\n assert max_hours == 10, err_msg\r\n assert (max_hours_anon + max_hours_auth) == 10, err_msg", "def set_Hour(self, value):\n super(GetTimestampFromDateParametersInputSet, self)._set_input('Hour', value)", "def overtime(self):\n if self._overtime != '':\n return True\n return False", "def _get_hours_pro_entry(time_entries):\n events = []\n for event in time_entries:\n start_time = datetime.datetime(\n date.today().year,\n date.today().month,\n date.today().day,\n event.start_at.hour,\n event.start_at.minute,\n event.start_at.second,\n )\n end_time = datetime.datetime(\n date.today().year,\n date.today().month,\n date.today().day,\n event.finish_at.hour,\n event.finish_at.minute,\n event.finish_at.second,\n )\n\n timediff = end_time - start_time\n events.append(\n {\n \"worked_hours\": round(timediff.total_seconds() / 3600, DECIMALS_HOUR),\n \"event\": event,\n }\n )\n return events", "def make_hourly(self,rate,name):\n id = self.find_employee_id(name)\n if id in self.clsf:\n self.emp_dict[id][5] = \"1\"\n print(\"{}{}\".format(name,\" was successfully changed to be an hourly employee\"))\n self.emp_dict[id][8] = rate\n self.classification()\n return self.emp_dict\n else:\n print(\"Error- employee not found\")\n self.employee_data()", "def display_hours(employee_id):\n\n if not g.user:\n flash(\"Please Login to continue.\", \"danger\")\n return redirect(\"/\")\n \n employee = Employee.query.get_or_404(employee_id)\n\n labels = json.dumps( [\"Completed\", \"Required\"])\n data = json.dumps([employee.completed, employee.required])\n \n return render_template(\"users/display_hours.html\", employee = employee, labels = labels, data = data)", "def test_emp_man_hours(self):\n start = timezone.make_aware(dt.datetime(2016, 6, 3, 6, 30))\n stop = timezone.make_aware(dt.datetime(2016, 6, 3, 10, 30))\n emp_hours = 0\n\n expected_emp_hours = 20.95\n\n # getting employee objects that are clocked in\n clocked_in_emp = get_clocked_in(start)\n emp_that_left = get_emp_who_left_during_shift(start, stop)\n emp_that_breaked = get_emp_who_left_on_break(start, stop)\n\n # testing return of number of hours\n for employee in clocked_in_emp:\n print(\"EMP= \", employee.PRSN_NBR_TXT)\n emp_hour = get_emp_man_hours(employee, start, stop)\n print(\"EMP HOUR= \", emp_hour)\n emp_hours += emp_hour\n\n for employee in emp_that_left:\n print(\"EMP= \", employee.PRSN_NBR_TXT)\n emp_hour = get_emp_man_hours(employee, start, stop)\n print(\"EMP HOUR= \", emp_hour)\n emp_hours += emp_hour\n\n for employee in emp_that_breaked:\n print(\"EMP= \", employee.PRSN_NBR_TXT)\n emp_hour = get_emp_man_hours(employee, start, stop)\n print(\"EMP HOUR= \", emp_hour)\n emp_hours += emp_hour\n\n self.assertAlmostEqual(emp_hours, expected_emp_hours)", "def check_overtime(self, cr, uid, att, context=None):\n if att:\n overtime_obj = self.pool.get('hr.overtime')\n orertime_ids = overtime_obj.search(cr, uid, [('employee_id', '=', att.employee_id.id),\n ('mode', '=', 'by_employee'),\n ('name', '=', att.day_tz),\n ('datetime_start', '<=', att.name),\n ('datetime_stop', '>=', att.name),\n ('state', 'not in', ['cancel', 'confirmed', 'done'])\n ])\n if orertime_ids:\n return True\n return False", "def initialtime_hour(self):\n return self._get_time_info([\"Initial_Time_H\", \"initialTimeHour\"])", "def convert_to_24_hours(time, ap):\r\n if ap.lower() == 'p' and time <= 12:\r\n time += 12\r\n\r\n return time", "def market_hours():\n current_time = datetime.datetime.now().time()\n # Check if the current time is in the time bracket in which NSE operates.\n # The market opens at 9:15 am\n start_time = datetime.datetime.now().time().replace(hour=9, minute=15, second=0, microsecond=0)\n # And ends at 3:30 = 15:30\n end_time = datetime.datetime.now().time().replace(hour=15, minute=30, second=0, microsecond=0)\n\n if current_time > start_time and current_time < end_time:\n return True\n\n # In case the above condition does not satisfy, the default value (False) is returned\n return False", "def is_market_hours():\n now = datetime.datetime.now()\n day = now.weekday()\n time = now.hour * 100 + now.minute\n\n if day > 4:\n return False\n\n if 930 <= time < 1600:\n return True\n\n return False", "def calc_hours(ws, ws_mo, r, c):\n formula = ws[f'{c}{r}'].value.split('!')[-1]\n total = ws_mo[formula].value[5:-1]\n result = 0.0\n for row in ws_mo[total]:\n if not row:\n break\n v = row[0].value\n if v is not None:\n result += row[0].value\n return result", "def hourly(self):\n return c.Hourly(self)", "def get_image_cleaner_interval_hours(self) -> Union[int, None]:\n interval_hours = self._get_image_cleaner_interval_hours(enable_validation=True)\n\n return interval_hours", "def __hour(self):\n return _VirtualColumn(\n df_name=self.thisptr[\"df_name_\"],\n operator=\"hour\",\n operand1=self,\n operand2=None\n )", "def get_working_hour(self):\n working_hrs_id = self.search([('active', '=', True)])\n if working_hrs_id:\n return {\n 'biz_open_time': time(int(working_hrs_id.start_hour), int(working_hrs_id.start_minute), 0),\n 'biz_close_time': time(int(working_hrs_id.end_hour), int(working_hrs_id.end_minute), 0),\n 'holiday_list': {line.occ_date: line.name for line in working_hrs_id.non_working_days_line},\n 'deadline_revise_percentage': working_hrs_id.deadline_revise_percentage,\n }\n raise ValidationError(\"Working Hour configuration is missing!\")", "def open_hours_detail(self):\n return self._open_hours_detail", "def _calculate_hours_percent(used_hours, estimated_hours):\n percent = (used_hours * 100) / estimated_hours\n return percent", "def validate_hour(self):\n\t\tlogin_before = int(webnotes.conn.get_value('Profile', self.user, 'login_before', ignore=True) or 0)\n\t\tlogin_after = int(webnotes.conn.get_value('Profile', self.user, 'login_after', ignore=True) or 0)\n\t\t\n\t\tif not (login_before or login_after):\n\t\t\treturn\n\t\t\t\n\t\tfrom webnotes.utils import now_datetime\n\t\tcurrent_hour = int(now_datetime().strftime('%H'))\n\t\t\t\t\n\t\tif login_before and current_hour > login_before:\n\t\t\twebnotes.msgprint('Not allowed to login after restricted hour', raise_exception=1)\n\n\t\tif login_after and current_hour < login_after:\n\t\t\twebnotes.msgprint('Not allowed to login before restricted hour', raise_exception=1)", "def hour(self) -> int:\r\n return self._hour", "def hour(self) -> int:\r\n return self._hour", "def run_hour(self) -> int:\n return self.timestamp.hour", "def is_night_hours(time):\n if time == datetime.time(22, 0, 0, 0):\n return True\n return time.hour in [22, 23, 0, 1, 2, 3, 4, 5]", "def format_hours(self, data):\n return unicode('%f' % data).rstrip('0').rstrip('.')", "def sum_hours(self):\n return sum([le.num_hrs for le in self.log_entries])", "def _get_number_of_hours(self, date_from, date_to, istirahat):\n\n DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n from_dt = datetime.strptime(date_from, DATETIME_FORMAT)\n to_dt = datetime.strptime(date_to, DATETIME_FORMAT)\n timedelta = to_dt - from_dt\n diff_day =(float(timedelta.seconds) / 3600) - istirahat\n return diff_day", "def give_raise(self):\r\n self.hourly_pay = 12.00", "def averageTime(self):\n \n pass", "def hour():\r\n\r\n date = datetime.datetime.now()\r\n hours = date.hour\r\n minute = date.minute\r\n\r\n return hours, minute", "def duration(self):\n delta = self.occurrence.end - self.occurrence.start\n real_hours = delta.days * 24 + delta.seconds / (60.0 * 60.0)\n\n adjusted_hours = attendance_settings.HOUR_MULTIPLIER * real_hours\n\n return adjusted_hours", "def seconds2hours(time_in_seconds):\n seconds_since_midnight = np.mod(time_in_seconds, SECONDS_PER_DAY)\n fraction_hour = seconds_since_midnight/SECONDS_PER_HOUR\n if fraction_hour[-1] == 0:\n fraction_hour[-1] = 24\n return fraction_hour", "def hourly(self, start_time: str = \"now\", end_time: Optional[str] = None,\n fields: List[str] = list()) -> dict:\n end_time = end_time or str(pendulum.parse(start_time).add(hours=108))\n query = {\n \"start_time\": start_time,\n \"end_time\": end_time,\n \"fields\": fields or self.fields\n }\n return self.call(\"weather/forecast/hourly\", query)", "def input_starting_hours(sheet_name, workbook_path):\n question = [\n inquirer.List('starting_hours',\n message=f'Have you already invested time into {sheet_name}?',\n choices=['Yes', 'No'],\n default='No',\n )\n ]\n answer = inquirer.prompt(question)\n choice = answer['starting_hours']\n if choice == 'Yes':\n starting_hours = int(input(f\"How many hours have you already invested in {sheet_name}? \"))\n wb = openpyxl.load_workbook(workbook_path)\n ws = wb.active\n ws['A2'] = starting_hours\n next_level = 1\n level = 0\n while starting_hours >= (next_level ** 2):\n starting_hours -= next_level ** 2\n level += 1\n next_level += 1\n ws['B2'] = level\n ws['B4'] = next_level\n ws['A4'] = next_level ** 2\n ws['C2'] = starting_hours\n ws['C4'] = (next_level ** 2) - starting_hours\n wb.save(workbook_path)", "def calculate_working_hours(logs, check_in_out_type, working_hours_calc_type):\n\ttotal_hours = 0\n\tin_time = out_time = None\n\tif check_in_out_type == 'Alternating entries as IN and OUT during the same shift':\n\t\tin_time = logs[0].time\n\t\tif len(logs) >= 2:\n\t\t\tout_time = logs[-1].time\n\t\tif working_hours_calc_type == 'First Check-in and Last Check-out':\n\t\t\t# assumption in this case: First log always taken as IN, Last log always taken as OUT\n\t\t\ttotal_hours = time_diff_in_hours(in_time, logs[-1].time)\n\t\telif working_hours_calc_type == 'Every Valid Check-in and Check-out':\n\t\t\tlogs = logs[:]\n\t\t\twhile len(logs) >= 2:\n\t\t\t\ttotal_hours += time_diff_in_hours(logs[0].time, logs[1].time)\n\t\t\t\tdel logs[:2]\n\n\telif check_in_out_type == 'Strictly based on Log Type in Employee Checkin':\n\t\tif working_hours_calc_type == 'First Check-in and Last Check-out':\n\t\t\tfirst_in_log_index = find_index_in_dict(logs, 'log_type', 'IN')\n\t\t\tfirst_in_log = logs[first_in_log_index] if first_in_log_index or first_in_log_index == 0 else None\n\t\t\tlast_out_log_index = find_index_in_dict(reversed(logs), 'log_type', 'OUT')\n\t\t\tlast_out_log = logs[len(logs)-1-last_out_log_index] if last_out_log_index or last_out_log_index == 0 else None\n\t\t\tif first_in_log and last_out_log:\n\t\t\t\tin_time, out_time = first_in_log.time, last_out_log.time\n\t\t\t\ttotal_hours = time_diff_in_hours(in_time, out_time)\n\t\telif working_hours_calc_type == 'Every Valid Check-in and Check-out':\n\t\t\tin_log = out_log = None\n\t\t\tfor log in logs:\n\t\t\t\tif in_log and out_log:\n\t\t\t\t\tif not in_time:\n\t\t\t\t\t\tin_time = in_log.time\n\t\t\t\t\tout_time = out_log.time\n\t\t\t\t\ttotal_hours += time_diff_in_hours(in_log.time, out_log.time)\n\t\t\t\t\tin_log = out_log = None\n\t\t\t\tif not in_log:\n\t\t\t\t\tin_log = log if log.log_type == 'IN' else None\n\t\t\t\telif not out_log:\n\t\t\t\t\tout_log = log if log.log_type == 'OUT' else None\n\t\t\tif in_log and out_log:\n\t\t\t\tout_time = out_log.time\n\t\t\t\ttotal_hours += time_diff_in_hours(in_log.time, out_log.time)\n\treturn total_hours, in_time, out_time", "def test_number_of_hours_util(self):\n self.assertEqual(20, number_of_hours(50, 40))", "def get_time_delta_in_hours(start, end):\n dhour = end.hour - start.hour\n dmin = end.minute - start.minute\n dsec = end.second - start.second\n dtime = timedelta(hours=dhour, minutes=dmin, seconds=dsec) # NOTE rounds to nearest second\n # print start, end, dtime\n return float(dtime.seconds) / (60*60)", "def test_start_end_hour():\n # sh = None\n # eh = None\n # data = None\n # result = makesky.start_end_hour(sh, eh, data)\n pass", "def getHour(self):\n return _libsbml.Date_getHour(self)", "def _event_time_changed(self, sender, obj, **kwargs):\n handle_event_time_update(obj)", "def totalHours(path):\n total = 0\n start = 0\n active = False\n with open(path, 'r') as f:\n reader = csv.reader(f)\n for row in reader:\n if row[1] == 't':\n total += float(row[2])\n else:\n if active:\n if row[1] == 's':\n total += float(row[0]) - start\n active = False\n else:\n if row[1] == 'a':\n start = float(row[0])\n active = True\n final = row\n if active:\n total += time.time() - float(final[0])\n active = False\n return \"%.2f\" % (total / 3600)", "def active_hours(self, active_hours):\n\n self._active_hours = active_hours", "def min_to_hours(value):\n\tminutes = int(value) \n\thours = minutes//60\n\tminutes = minutes%60\n\tresult = ''\n\tif hours > 0:\n\t\tresult = '%dh %dm' % (hours, minutes)\n\telse:\n\t\tresult = '%dm' % (minutes)\n\n\treturn result", "def min_myproxy_hours(self):\n return int(self.__get_option('min_myproxy_valid_hours'))", "def min_voms_proxy_hours(self):\n return int(self.__get_option('min_voms_proxy_valid_hours'))", "def during_operating_hours(dry_run=False, starthour=None, endhour=None):\n if starthour is None:\n starthour = get_nightly_start_time()\n if endhour is None:\n endhour = get_nightly_end_time()\n ensure_tucson_time()\n hour = time.localtime().tm_hour\n\n if endhour < starthour:\n return dry_run or (hour < endhour) or (hour > starthour)\n else:\n return dry_run or ( (hour < endhour) and (hour > starthour) )", "def get_24h(self):\n records = self.level_model.get_for_period(1)\n self.set_attributes(records, '24 hours')", "def _sum_hours(time_entries):\n hours = sum(map(lambda x: x.duration, time_entries), 0)\n hours = round(hours, DECIMALS_HOUR)\n return hours", "def showClock(hour: int, min: int):\n pass", "def is_peak_hours(time):\n if not 1 <= time.isoweekday() <= 5:\n return False\n if time.hour in [6, 7, 8, 18, 19, 20]:\n return True\n\n return False", "def get_total_hours(model):\n total_hours = [v.contributed_hours for k, v in model.schedule.agents_by_type['Customer'].items()]\n return round(float(np.sum(total_hours)), 2)", "def get_hours_of_interest(current_time, hours=None, add_weekend_hour=True):\n\n if hours is None: # pragma: no cover\n hours = [8, 12, 18]\n else:\n hours = list(hours)\n\n current_hour = current_time.hour\n\n if add_weekend_hour and current_time.weekday() in [4, 5]:\n hours.append(22)\n\n hours_of_interest = []\n\n hours = sorted(hours)\n for n in range(len(hours)):\n if current_hour + 1 < hours[n]:\n hours_of_interest = hours[n:]\n break\n\n logging.debug('Hours of interest: %s', hours_of_interest)\n return hours_of_interest" ]
[ "0.67112166", "0.6592598", "0.6250287", "0.61557996", "0.61208427", "0.594246", "0.59240556", "0.5915791", "0.59038764", "0.5896075", "0.5838429", "0.58366513", "0.5824198", "0.5824198", "0.5824198", "0.5824198", "0.5765874", "0.5749549", "0.57289326", "0.57184255", "0.5690366", "0.5653519", "0.5641949", "0.56209594", "0.561421", "0.55709803", "0.55559236", "0.5554775", "0.5543168", "0.5543168", "0.5543168", "0.5535974", "0.5528796", "0.5526552", "0.551726", "0.5498021", "0.549544", "0.54901105", "0.54885936", "0.5488122", "0.54740834", "0.546918", "0.5460151", "0.54505765", "0.54436547", "0.54340607", "0.5430347", "0.5418548", "0.5395452", "0.5389011", "0.5388846", "0.536866", "0.5366973", "0.5358724", "0.5350912", "0.5340173", "0.5327318", "0.5322071", "0.53102565", "0.53088826", "0.53007", "0.5295578", "0.52832735", "0.5282363", "0.5271224", "0.5266864", "0.5264336", "0.5259859", "0.5259523", "0.5259523", "0.5247479", "0.5235605", "0.52301735", "0.5224861", "0.52198976", "0.52171683", "0.5190556", "0.51693743", "0.5164779", "0.5157332", "0.51467985", "0.5141959", "0.513642", "0.5135333", "0.51279485", "0.51275027", "0.51253736", "0.5115758", "0.5114609", "0.51130503", "0.51054853", "0.5102883", "0.50996155", "0.5098217", "0.5090173", "0.5090036", "0.50806475", "0.5077292", "0.50734794", "0.5073115" ]
0.55416137
31
Given a urlsafe version of an Album key, get the actual key
def get_album_key_by_keystr(keystr): attr_err = 'Keystrings must be an instance of base string, recieved: %s' % keystr kind_err = 'Expected urlsafe keystr for kind %s but received keystr for kind %s instead.' if not keystr or not isinstance(keystr, basestring): raise RuntimeError(attr_err) key = ndb.Key(urlsafe=keystr) if not key.kind() == PHOTOALBUM_KIND: raise RuntimeError(kind_err % (PHOTOALBUM_KIND, key.kind())) return key
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_album_key(slug):\n err = 'Series slug must be defined and of of type basestring'\n\n if not slug or not isinstance(slug, basestring):\n raise RuntimeError(err)\n\n return ndb.Key(PHOTOALBUM_KIND, slug)", "def get_key_from_url(file_url):\t\n\tparts = urlparse(file_url)\n\tbucket_name = get_bucket_name_from_url(file_url)\n\tkey = parts.path.replace(\"/\" + bucket_name + \"/\", \"\")\n\treturn key", "def as_key(key):\n return key.lstrip('/').rstrip('/')", "def key_id(cls, url: str):\r\n ...", "def create_key_from_url(raw_url):\n org_url = urllib2.urlparse.urlparse(raw_url)\n new_key = ''\n net_location = org_url.netloc\n netloc_list = net_location.split(\".\")\n netloc_list.reverse()\n for part in netloc_list:\n new_key += '%s.' % part\n new_key = new_key[:-1] # Removes trailing period\n new_key = new_key + org_url.path \n return new_key", "def get_album_by_slug(slug):\n\n album_key = get_album_key(slug)\n album = album_key.get()\n return album", "def _extract_spreadsheet_key_from_url(url):\r\n result = url\r\n\r\n if 'key=' in url:\r\n result = url.split('key=')[-1].split('#')[0].split('&')[0]\r\n\r\n return result", "def key(key):\n return key", "def _get_key_url(self, key):\n urls = self.get_URLS(key)\n\n if len(urls) == 1:\n return urls[0]\n else: # multiple\n # TODO: utilize cache to check which archives might already be\n # present in the cache.\n # Then if not present in the cache -- check which are present\n # locally and choose that one to use\n if self._last_url and self._last_url in urls:\n return self._last_url\n else:\n return urls[0] # just the first one", "def resolve_key(obj, _):\n return obj.key.decode()", "def _get_raw_key(self, key_id):", "def parse_key(key_id):\n\tcomment = get_key_comment(key_id)[0]\n\tregex = re.compile(\".*?\\\\((.*?)\\\\)\")\n\tcomment_bits = re.findall(regex, comment)[0].split(' ')\n\tif comment_bits[0] == sha256(comment_bits[1]).hexdigest():\n\t\treturn comment_bits[1]", "def parse_key(key_id):\n comment = get_key_comment(key_id)[0]\n regex = re.compile(\".*?\\\\((.*?)\\\\)\")\n comment_bits = re.findall(regex, comment)[0].split(' ')\n if comment_bits[0] == sha256(comment_bits[1]).hexdigest():\n return comment_bits[1]", "def key_for_bucket(self, key):\n\n try:\n return int(key[0] // 16), int(key[1] // 16), int(key[2] // 16)\n except ValueError:\n return KeyError(\"Key %s isn't usable here!\" % repr(key))", "def key_for_bucket(self, key):\n\n try:\n return int(key[0] // 16), int(key[1] // 16)\n except ValueError:\n return KeyError(\"Key %s isn't usable here!\" % repr(key))", "def _get_key_name(self, name):\n base_path = force_text(self.location)\n final_path = urljoin(base_path + \"/\", name)\n name = os.path.normpath(final_path.lstrip('/'))\n\n if six.PY2:\n name = name.encode('utf-8')\n return name", "def _getNDBKey(websafe_key_to_get):\n return ndb.Key(urlsafe=websafe_key_to_get)", "def get_store_key(asset):\n return '.'.join([asset.name, asset.uid, asset.ext])", "def _get_akey_afile(self, key):\n url = self._get_key_url(key)\n return self._parse_url(url)[:2] # skip size", "def _resolve_apikey(url: str, apikey: Optional[str]) -> Tuple[str, str]:\n # Even though the async api doesn't support apikey query parameter,\n # for ease of use support providing it as query parameter in the url.\n # authorization is always done via Authorization header\n url, params = UrlManipulation.separate_query_params(url, (\"apikey\",))\n try:\n apikey = params[\"apikey\"][0]\n except KeyError:\n pass\n\n if apikey is None:\n raise ValueError(\"apikey not defined\")\n\n return url, apikey", "def decode_key_from_mongo(fieldname):\r\n return urllib.unquote(fieldname)", "def get_apiauth_object_by_key(key):\n return model.APIAuth.query.filter_by(key=key).first()", "def parse_camera_name_from_object_key(object_key):\n first_parts = object_key.split(\"/\")\n return first_parts[1]", "def get_key_id(self):", "def get_api_key(api_key):\n api.get(api_key)", "def fname(key):\n return key.rsplit(\"/\", 1)[-1]", "def getKey(self, namespace, ns_key):\n namespace = self._fixNS(namespace)\n if namespace == BARE_NS:\n return ns_key\n\n ns_alias = self.namespaces.getAlias(namespace)\n\n # No alias is defined, so no key can exist\n if ns_alias is None:\n return None\n\n if ns_alias == NULL_NAMESPACE:\n tail = ns_key\n else:\n tail = '%s.%s' % (ns_alias, ns_key)\n\n return 'openid.' + tail", "def get_safe_part(key):\r\n version = key[0]\r\n # This function should only be called on versioned keys.\r\n assert version\r\n\r\n # Find the md5 hash part.\r\n c_link_key = key[1]\r\n for key_element in c_link_key[1:]:\r\n if (isinstance(key_element, basestring)\r\n and key_element.startswith('md5:')):\r\n md5 = key_element[4:]\r\n break\r\n\r\n return key[0] + (md5, )", "def strkey(item):\n return '%s:%s:%s' % (item['group_id'], item['artifact_id'], item['version'])", "def sub_key(dirname):\n return SUB_PREFIX + dirname", "def getKey(self, key):\n return self.BUCKET.get_key(key)", "def find_album_playlist(data):\n\n return data['album'].lower() + '.m3u'", "def _decode_key(self, key):\n return key if not key or isinstance(key, str) else key.decode()", "def key(self):\n return self._key.decode('utf-8')", "def fullkey(self, key):\n if len(self.basekey) > 0:\n return \"{}:{}\".format(self.basekey, key)\n else:\n return key", "def gallery_key():\n return ndb.Key('Gallery', 'All')", "def root_given_key(prob_key):\n root = ''\n for i, info in enumerate(prob_key):\n if i != 0:\n root += '_'\n root += str(info)\n return root.replace('.', '_')", "def prepare_key(self, key):\n return smart_str(key)", "def test_extract_spreadsheet_key_from_url(self):\r\n # Pass various URLs with different key/value combos.\r\n obs = _extract_spreadsheet_key_from_url(self.url1)\r\n self.assertEqual(obs, self.spreadsheet_key)\r\n\r\n obs = _extract_spreadsheet_key_from_url(self.url2)\r\n self.assertEqual(obs, self.spreadsheet_key)\r\n\r\n obs = _extract_spreadsheet_key_from_url(self.url3)\r\n self.assertEqual(obs, self.spreadsheet_key)\r\n\r\n # Pass a key directly.\r\n obs = _extract_spreadsheet_key_from_url(self.spreadsheet_key)\r\n self.assertEqual(obs, self.spreadsheet_key)\r\n\r\n # Pass 'key=<key>'.\r\n obs = _extract_spreadsheet_key_from_url('key=' + self.spreadsheet_key)\r\n self.assertEqual(obs, self.spreadsheet_key)", "def get_store_key(asset, variation):\n return '.'.join([\n asset.name,\n asset.uid,\n variation.name,\n variation.version,\n variation.ext\n ])", "def key_url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key_url\")", "def key_url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key_url\")", "def key_from_ireq(ireq):\n if ireq.req is None and ireq.link is not None:\n return str(ireq.link)\n else:\n return key_from_req(ireq.req)", "def GetKeyByPath(self, key_path):", "def decode_key(key: str) -> Tuple[int, int]:\n try:\n mod, exp = key.split(\".\")\n except ValueError:\n raise ValueError(f\"`{key}` is not a valid key\")\n\n return (\n int.from_bytes(base64.urlsafe_b64decode(mod), config.BYTEORDER),\n int.from_bytes(base64.urlsafe_b64decode(exp), config.BYTEORDER, signed=True),\n )", "def parse_quicklook_key(key: str) -> Dict[str, Any]:\n\n # Example input\n # CBERS4/AWFI/155/135/CBERS_4_AWFI_20170515_155_135_L2/CBERS_4_AWFI_20170515_155_135.jpg\n\n match = re.search(\n r\"(?P<satellite>\\w+)/(?P<camera>\\w+)/\"\n r\"(?P<path>\\d{3})/(?P<row>\\d{3})/(?P<scene_id>\\w+)/\",\n key,\n )\n assert match, \"Could not match \" + key\n return {\n \"satellite\": match.group(\"satellite\"),\n \"camera\": match.group(\"camera\"),\n \"path\": match.group(\"path\"),\n \"row\": match.group(\"row\"),\n \"scene_id\": match.group(\"scene_id\"),\n \"collection\": match.group(\"satellite\") + match.group(\"camera\"),\n }", "def get_album(self) -> Optional[str]:\n return self.album", "def _extract_bucket_key(s3_uri: str)->tuple:\n s3_regex=\"^s3://([a-z0-9.-]+)/(.*)$\"\n search =re.search(s3_regex, s3_uri)\n if search is None:\n raise Error(\"Invalid s3 uri: {}\".format(s3_uri))\n return search.groups()", "def build_key(key):\n return os.path.join(PREFIX, key)", "def get(id):\n repo = KeyRepository(getDb())\n try:\n key = repo.findOne(id)\n except DBException:\n return {'message': 'Key id is invalid'}, 400\n\n if key == None:\n return {'message': 'Key has not been found'}, 404\n else:\n return base64.b64decode(key.publicKey)", "def get_by_urlsafe(urlsafe, model):\n try:\n key = ndb.Key(urlsafe=urlsafe)\n except TypeError:\n raise endpoints.BadRequestException('Invalid Key')\n except Exception as e:\n if e.__class__.__name__ == 'ProtocolBufferDecodeError':\n raise endpoints.BadRequestException('Invalid Key')\n else:\n raise\n\n entity = key.get()\n if not entity:\n return None\n if not isinstance(entity, model):\n raise ValueError('Incorrect Kind')\n return entity", "def _encode_key(self, key: str) -> str:\n return key", "def _shorten_key(telstate, key):\n for prefix in telstate.prefixes:\n if key.startswith(prefix):\n return key[len(prefix):]\n return ''", "def _get_doc_key(self, doc_id, key):\n cursor = self.connection.cursor()\n cursor.execute(\n \"SELECT {} FROM documents WHERE id = ?\".format(key),\n (normalize(doc_id),)\n )\n result = cursor.fetchone()\n cursor.close()\n return result if result is None else result[0]", "def _key_hash(self, key):\n\n split_key = key.strip(' ').split(' ')[1]\n return int(split_key)", "def get_album(album_id):\n return query_single(album_id, Album, album_schema)", "def to_uri(bucket: str, key: str) -> str:\n return f's3://{bucket}/{key}'", "def champion_key_from_id(champion_id):\n return champions[\"data\"][str(champion_id)][\"key\"]", "def decode(self, shortUrl: str) -> str:\n try:\n return self.bucket.get(shortUrl)\n except:\n return None", "def proper_key(key, klen):\n ckey = \"\"\n if len(key) < klen:\n lmulti = math.floor(klen/len(key))\n lmod = klen % len(key)\n ckey = key * int(lmulti) + key[:lmod]\n elif len(key) > klen:\n ckey = key[:klen]\n else:\n ckey = key\n return ckey", "def safe_key(key, key_prefix, version):\r\n\r\n # Clean for whitespace and control characters, which\r\n # cause memcache to raise an exception\r\n key = cleaned_string(key)\r\n key_prefix = cleaned_string(key_prefix)\r\n version = cleaned_string(version)\r\n\r\n # Attempt to combine the prefix, version, and key\r\n combined = \":\".join([key_prefix, version, key])\r\n\r\n # If the total length is too long for memcache, hash it\r\n if len(combined) > 250:\r\n combined = fasthash(combined)\r\n\r\n # Return the result\r\n return combined", "def _makeInternalIdentifier(self, prefix, key):\n\n return '_:' + hashlib.sha1(\n ('fb'+prefix+'key'+key).encode('utf-8')).hexdigest()[1:20]", "def get_poll_key(poll):\n return hashlib.sha224(poll.get_absolute_url()).hexdigest()", "def _GetKeyString(self):", "def _GetKeyString(self):", "def _get_key_id(self, user_obj):\n handle = user_obj.handle\n normalised_host = self.normalise_hostname(self._host_name)\n return '{}#main-key'.format(self._build_local_actor_url(handle,\n normalised_host))", "def get_key_for_path(cls, path):\n key = hashlib.sha224(path).hexdigest()\n return 'flexible_page_url_{}'.format(key)", "def _extract_immediate_prefix(obj_key:str)->str:\n immed_prefix = \"\"\n if len(obj_key.split(\"/\")) > 1:\n immed_prefix = obj_key.split(\"/\")[-2]\n \n return immed_prefix", "def dal_get(key):\n global store\n return store.get(urllib.quote(key))", "def album(self, q, page=None):\r\n return self.get('album', q, page)", "def GetRootKey(self):", "def _event_key(cls, thing_event):\n\n return thing_event.thing.url_name, thing_event.url_name", "def asset_db_key(location):\r\n return location.to_deprecated_son(tag=XASSET_LOCATION_TAG, prefix='_id.')", "def key(param: str):\n if re.match(r'^[A-Z_]+$', param):\n return 'KEY_' + param\n return 'KEY_' + re.sub(r'([a-z]|[A-Z]{2,})([A-Z]|\\d$)', r'\\1_\\2', param).upper()", "def GetKey(self, version_number):\n return self.dict[str(version_number)]", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")" ]
[ "0.7271961", "0.64886624", "0.6453254", "0.62893945", "0.6282472", "0.62622005", "0.6248456", "0.62307674", "0.61798584", "0.6131501", "0.60555077", "0.60446566", "0.60090566", "0.59870636", "0.5964662", "0.5961178", "0.59544635", "0.5880653", "0.5861731", "0.58558726", "0.58508784", "0.58307904", "0.58287644", "0.5804643", "0.57614213", "0.57265276", "0.57227707", "0.5697038", "0.5687968", "0.56814444", "0.5664947", "0.56557286", "0.5647611", "0.562667", "0.5621664", "0.56205857", "0.5609431", "0.56016624", "0.556049", "0.55515575", "0.55415624", "0.55415624", "0.55359143", "0.55350953", "0.55067605", "0.5505541", "0.55027866", "0.5502565", "0.5471904", "0.5468141", "0.54674697", "0.5465883", "0.5454954", "0.5444503", "0.54410523", "0.54219425", "0.54200435", "0.541799", "0.5416974", "0.541431", "0.5412125", "0.540967", "0.5407834", "0.54024285", "0.54024285", "0.5396643", "0.5395475", "0.53889626", "0.53827786", "0.53675395", "0.53649", "0.53622115", "0.5356064", "0.53558564", "0.5352148", "0.5351561", "0.5351561", "0.5351561", "0.5351561", "0.5351561", "0.5351561", "0.5351561", "0.5351561", "0.5351561", "0.5351561", "0.5351561", "0.5351561", "0.5351561", "0.5351561", "0.5351561", "0.5351561", "0.5351561", "0.5351561", "0.5351561", "0.5351561", "0.5351561", "0.5351561", "0.5351561", "0.5351561", "0.5351561" ]
0.76117
0
Create a ndb.Key given an Album slug
def get_album_key(slug): err = 'Series slug must be defined and of of type basestring' if not slug or not isinstance(slug, basestring): raise RuntimeError(err) return ndb.Key(PHOTOALBUM_KIND, slug)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_key(cls, song_id):\n return ndb.Key(cls, song_id)", "def get_album_key_by_keystr(keystr):\n attr_err = 'Keystrings must be an instance of base string, recieved: %s' % keystr\n kind_err = 'Expected urlsafe keystr for kind %s but received keystr for kind %s instead.'\n if not keystr or not isinstance(keystr, basestring):\n raise RuntimeError(attr_err)\n\n key = ndb.Key(urlsafe=keystr)\n if not key.kind() == PHOTOALBUM_KIND:\n raise RuntimeError(kind_err % (PHOTOALBUM_KIND, key.kind()))\n\n return key", "def get_album_by_slug(slug):\n\n album_key = get_album_key(slug)\n album = album_key.get()\n return album", "def make_album(artist_name, album_title): \n music_album = {\n 'Artist': artist_name.title(),\n 'Album': album_title.title()\n }\n return music_album", "def make_album(artist, title):\n album_dict = {\n 'artist': artist.title(),\n 'title': title.title(),\n }\n return album_dict", "def genre_key(genre_name=DEFAULT_GENRE):\n return ndb.Key('Genre', genre_name.lower())", "def blog_key(blog_name=DEFAULT_BLOG_NAME):\n return ndb.Key('Blog', blog_name)", "def make_album(artist_name, album_title, track_number=''):\n album = {'artist': artist_name,\n 'album title': album_title,\n }\n if track_number:\n album['track number'] = track_number\n return album", "def key_id(cls, url: str):\r\n ...", "def cmd_album_create(client, args):\n fields = data_fields(args, client.allowed_album_fields)\n album = client.create_album(fields)\n generate_output({'album': album})", "def gallery_key():\n return ndb.Key('Gallery', 'All')", "def create_key(cls, topic, entry_id):\n\t\treturn db.Key.from_path(\n\t\t\t\tFeedRecord.kind(),\n\t\t\t\tFeedRecord.create_key_name(topic),\n\t\t\t\tcls.kind(),\n\t\t\t\tget_hash_key_name(entry_id))", "def post_key(post_name=DEFAULT_POST_NAME):\n return ndb.Key('Post', post_name)", "def make_album(artist_name, album_title, tracks=0):\n album = {'artist': artist_name.title(), 'album': album_title.title(),}\n if tracks:\n album['tracks'] = tracks\n return album", "def create_key(cls, topic):\n\t\treturn datastore_types.Key.from_path(cls.kind(), utils.get_hash_key_name(topic))", "def _make_key(self, extra_prefix, key):\n if extra_prefix:\n return \"-\".join((self.prefix, extra_prefix, key))\n else:\n return \"-\".join((self.prefix, key))", "def _make_key(self, extra_prefix, key):\n if extra_prefix:\n return \"-\".join((self.prefix, extra_prefix, key))\n else:\n return \"-\".join((self.prefix, key))", "def build_key(model, id):\n return \"{}.{}\".format(model.__name__, id)", "def get_or_create(cls, key, urlsafe=False, **kwargs):\n if urlsafe:\n key = ndb.Key(urlsafe=key)\n ent = key.get()\n if ent is not None:\n return (ent, False) # False meaning \"not created\"\n ent = cls(**kwargs)\n ent.key = key\n ent.put()\n return (ent, True) # True meaning \"created\"", "def keyify(content_type_pk, pk):\n return '%s:%s' % (content_type_pk, pk)", "def make_album(artist_name, album_title, num_songs=None):\n album = {\n 'name': artist_name,\n 'album': album_title,\n 'num_songs': num_songs,\n }\n\n # This will not work!\n# album['name'] = artist_name\n# album['album'] = album_title\n# album['num_songs'] = num_songs\n\n return album", "def set_album(audio: EasyID3, album):\r\n audio['album'] = album\r\n audio.save()", "def make_album1(artist_name, album_title, track_number=''):\n album = {'artist': artist_name,\n 'album title': album_title,\n }\n if track_number:\n album['track number'] = track_number\n return album", "def add_contributor_album(slug, username):\n contrib = Contributor.get(username)\n album = Album.get(slug)\n ContributorAlbum(slug=album.slug, username=contrib.username).save()", "def _makeInternalIdentifier(self, prefix, key):\n\n return '_:' + hashlib.sha1(\n ('fb'+prefix+'key'+key).encode('utf-8')).hexdigest()[1:20]", "def create_key ():", "def make_album(artist_name, album_title, no_of_songs=None):\n if no_of_songs:\n album = {'artist': artist_name, 'title': album_title, 'songs': no_of_songs}\n else:\n album = {'artist': artist_name, 'title': album_title}\n \n return album", "def make_album(name,album_name,song_num=''):\r\n\tmusic_album={'name':name.title(),'album_name':album_name}\r\n\tif song_num:\r\n\t\tmusic_album['song_num']=song_num\r\n\treturn(music_album)", "def create(self, identity, record=None, data=None, **kwargs):\n self.set_slug(record, data[\"slug\"])", "def test_get_album_id_regular_album(self):\n album_id = self.add_album(artist='Artist', album='Album')\n self.assertNotEqual(album_id, 0)\n track = Track(artist='Artist', album='Album', title='Title')\n track_album_id = self.app.set_album_id(track)\n self.assertEqual(track_album_id, album_id)\n self.assertEqual(track.album_id, album_id)", "def make_album(artist_name, album_title, songs=None):\n album = {'artist': artist_name, 'title': album_title}\n if songs:\n album['songs'] = songs\n return album", "def make_new_key(idx, key, d):\n\n new_key = \"%s_%d\" % (key, idx)\n if new_key in d:\n return make_new_key(idx + 1, key, d)\n return new_key", "def _make_id(self,kwargs, key=\"nid\"):\n\t\teid = \"\"\n\t\tif kwargs.get(\"host\", False):\n\t\t\teid = kwargs.get('host', \"\") + \"-\" + kwargs.get(key)\n\t\telse:\n\t\t\teid = kwargs.get(key)\n\t\treturn eid", "def _create_key(item, duplicate_sources):\n if item[\"nom\"] not in duplicate_sources:\n return item[\"nom\"]\n dateref = item[\"date_ref\"]\n year = re.search(r\"\\d{4}\", dateref).group(0)\n return f\"{item['nom']}_{year}\"", "def id_to_key(cls,id):\n try:\n id = long(id)\n except ValueError:\n pass # it was a string, not an int.\n return ndb.Key(cls._get_kind(),id)", "def create(cls, subdomain, key, **kwargs):\n key_name = subdomain + ':' + key\n return cls(key_name=key_name, subdomain=subdomain, **kwargs)", "def make_album(artist, album, tracks=\"\"):\n retval = {'artist': artist, 'album': album}\n if tracks:\n retval['tracks'] = tracks\n return retval", "def make_album(artist, title, tracks=0):\n album_dict = {\n 'artist': artist.title(),\n 'title': title.title(),\n }\n if tracks:\n album_dict['tracks'] = tracks\n return album_dict", "def make_album(artist, title, tracks=0):\n album_dict = {\n 'artist': artist.title(),\n 'title': title.title(),\n }\n if tracks:\n album_dict['tracks'] = tracks\n return album_dict", "def make_key(*args, **kwargs) -> Hashable:\n if len(args) == 1 and isinstance(args[0], (int, str)):\n return args[0]\n if kwargs:\n args = sum(kwargs.items(), (*args, _KWD_MARK))\n return _HashedSeq(args)", "def key_from_path(db_table, value):\r\n if isinstance(value, (int, long)):\r\n ValidateInteger(value, 'id')\r\n return Key.from_path(db_table, value)", "def create_slug(self):\n slug = slugify(self.title)\n new_slug = slug\n n = 1\n while Article.objects.filter(slug=new_slug).exists():\n new_slug = '{}-{}'.format(slug, n)\n n += 1\n\n return new_slug", "def portfolio_key(name = 'default'):\n return ndb.Key('portfolio', name)", "def createkey(*args): # {{{2\n return '-'.join(map(simplifyname, args))", "def make_album(artist, title, songs=None):\n album = {}\n album['artist'] = artist\n album['title'] = title\n if songs:\n album['songs'] = songs\n return album", "def map_key(map_name=DEFAULT_MAP_NAME):\n return ndb.Key('PhotoMap', map_name)", "def make_album(artist,title,tracks=''):\n album = {\"By\":artist,\"Titled\":title, \"Tracks\": tracks}\n return album", "def make_album(artist_name, album_title, tracks=''):\n album = {\"name\": artist_name, \"title\": album_title}\n if tracks:\n album[\"tracks\"] = tracks\n return album", "def guestbook_key(guestbook_name=DEFAULT_GUESTBOOK_NAME):\n return ndb.Key('Guestbook', guestbook_name)", "def guestbook_key(guestbook_name=DEFAULT_GUESTBOOK_NAME):\n return ndb.Key('Guestbook', guestbook_name)", "def guestbook_key(guestbook_name=DEFAULT_GUESTBOOK_NAME):\n return ndb.Key('Guestbook', guestbook_name)", "def unique_key_generator(instance):\n size = random.randint(30, 45)\n key = get_random_string(size=size)\n\n Klass = instance.__class__\n qs_exists = Klass.objects.filter(key=key).exists()\n if qs_exists:\n return get_unique_slug(instance)\n return key", "def sub_key(dirname):\n return SUB_PREFIX + dirname", "def image_key(image_name=DEFAULT_IMAGE_NAME):\n return ndb.Key('Image', image_name)", "def MakeKey(self, string, string_1, string_2):\n ...", "def catalog_key(catalog_name=DEFAULT_CATALOG_NAME):\n return ndb.Key('Catalog', catalog_name)", "def test_get_album_id_regular_and_various_album(self):\n var_album_id = self.add_album(artist='Various', album='Album')\n self.assertNotEqual(var_album_id, 0)\n reg_album_id = self.add_album(artist='Artist', album='Album')\n self.assertNotEqual(reg_album_id, 0)\n track = Track(artist='Artist', album='Album', title='Title')\n track_album_id = self.app.set_album_id(track)\n self.assertEqual(track_album_id, reg_album_id)\n self.assertEqual(track.album_id, reg_album_id)", "def add_album_with_contributor(title, username):\n album = Album(title=title) \n album.save()\n ContributorAlbum(slug=album.slug, username=username).save()\n return album", "def cmd_album_id(client, args):\n album = client.get_album(args.album_id)\n data = album.__dict__\n generate_output({'album': data})", "def make_asset_key(self, asset_type, path):\r\n raise NotImplementedError()", "def generate_song_id():\n return TinyIDGenerator(namespace=\"SGID\").generate_tinyid(run_in_transaction=ndb.in_transaction())", "def test_create_primary_key(self):\n assert self.tbl.primary_key == 'id'", "def make_album(name, title, tracks=\"\"):\n dict = {\n 'artist': name.title(),\n 'title': title.title(),\n }\n if tracks:\n dict['tracks'] = tracks\n return dict", "def make_album(name, title, tracks=\"\"):\n dict = {\n 'artist': name.title(),\n 'title': title.title(),\n }\n if tracks:\n dict['tracks'] = tracks\n return dict", "def make_album(artist, title):\n album = {}\n# print(\"\\nPlease give us information about an album.\")\n# artist = input(\"Please name the artist: \")\n# title = input(\"Please name the title of the album: \")\n album['artist'] = artist\n album['title'] = title\n # Alternatively\n# album = {\n# 'artist': artist,\n# 'title': title,\n# }\n return album", "def blog_key(name='default'):\n\n return db.Key.from_path('blogs', name)", "def blog_key(name = 'default'):\n return db.Key.from_path('blogs', name)", "def create_or_update(cls,key=None,**kwargs):\n if key:\n db = key.get()\n db.populate(**kwargs)\n else:\n db = cls(**kwargs)\n key = db.put()\n return key", "def create_entity(data: dict) -> str:\n new_uuid = str(uuid4())\n Entity.create(uuid=new_uuid, data=data[\"data\"])\n return new_uuid", "async def build_key(self, attribute, value, record_id='*'):\n self.key = '{var1}:{var2}:{var3}:{var4}:{var5}'.format(var1=record_id, var2=self.industry, var3=self.merchant,\n var4=attribute, var5=value)", "def make_album(artist_name, album_name, tracks=None):\n album_data = {'artist': artist_name, 'album': album_name}\n if tracks:\n album_data['tracks'] = tracks\n return album_data", "def insert_song(self, song_name: str, title: str, artist: str, file_hash: str, total_hashes: int) -> int:\n id = random.randint(1, 1000000000000)\n song = Songs(meta={'id': id}, song_name=song_name, song_title=title, artist=artist, file_sha1=file_hash, total_hashes=total_hashes)\n song.save()\n return id", "def test_get_album_id_various_album(self):\n album_id = self.add_album(artist='Various', album='Album')\n self.assertNotEqual(album_id, 0)\n track = Track(artist='Artist', album='Album', title='Title')\n track_album_id = self.app.set_album_id(track)\n self.assertEqual(track_album_id, album_id)\n self.assertEqual(track.album_id, album_id)", "def unique_key_generator(instance):\n size = random.randint(30, 45)\n key = random_string_generator(size=size)\n\n Klass = instance.__class__\n qs_exists = Klass.objects.filter(key=key).exists()\n if qs_exists:\n return unique_slug_generator(instance)\n return key", "def test_insert_simple(self):\n album = Album(artist='Artist', album='Album', album_type='ep',\n totaltracks=5, totalseconds=42, last_transform=3)\n pk = album.insert(self.app.db, self.app.curs)\n\n self.assertNotEqual(pk, None)\n self.assertNotEqual(pk, 0)\n self.assertEqual(self.get_album_count(), 1)\n album_row = self.get_album_by_id(pk)\n self.assertEqual(album_row['alartist'], 'Artist')\n self.assertEqual(album_row['alalbum'], 'Album')\n self.assertEqual(album_row['altype'], 'ep')\n self.assertEqual(album_row['totaltracks'], 5)\n self.assertEqual(album_row['totalseconds'], 42)\n self.assertEqual(album_row['lasttransform'], 3)", "def make_album_two(artist_name, album_title, number_of_songs= None): \n music_album = {'Artist': artist_name.title(),\n 'Album': album_title.title()}\n if number_of_songs:\n music_album['Number of Songs'] = number_of_songs\n return music_album", "def test_auto_slug(self):\n category = Category.objects.create()\n translation = CategoryTranslation.objects.create(\n name=\"Charter Schools\", category=category)\n self.assertEqual(category.slug, \"charter-schools\")", "def save(self, *args, **kwargs):\n self.slug = \"/\".join([\n slugify(__class__.__name__.lower()),\n settings.PK_PLACEHOLDER,\n slugify(self.name)\n ])\n super(__class__, self).save(*args, **kwargs)", "def create_key_from_url(raw_url):\n org_url = urllib2.urlparse.urlparse(raw_url)\n new_key = ''\n net_location = org_url.netloc\n netloc_list = net_location.split(\".\")\n netloc_list.reverse()\n for part in netloc_list:\n new_key += '%s.' % part\n new_key = new_key[:-1] # Removes trailing period\n new_key = new_key + org_url.path \n return new_key", "def create_api_key(sender, **kwargs):\r\n if kwargs.get('created') is True:\r\n ApiKey.objects.create(user=kwargs.get('instance'))", "def build_key(key):\n return os.path.join(PREFIX, key)", "def dbmodel_key(model_name=DATABASE_NAME):\n return ndb.Key('ModelDB', model_name)", "def get_datastore_key(model, pk):\n\n kind = get_top_concrete_parent(model)._meta.db_table\n return Key.from_path(kind, pk)", "def generate_identifier(sender, instance, **kwargs):\n identifier = Concept.create_identifier(instance.query)\n qs = Concept.objects.filter(identifier=identifier, lang=instance.lang)\n if instance.pk:\n qs = qs.exclude(pk=instance.pk)\n if qs.count() > 0:\n raise ValueError(\"Concept identifier conflict\")\n instance.identifier = identifier", "def item_slug(self, item, **kwargs):\n name = self.project_data.slug(item_name=item.name)\n\n if len(kwargs.keys()) > 0:\n name = '{name}{extra}'.format(name=name, extra='-'.join([unicode(i) for i in kwargs.values()]))\n\n return shortuuid.uuid(name=name)", "def test_short():\n key = 'A' * 241\n full_key = 'prefix:1:%s' % key\n assert full_key == make_key(key, 'prefix', 1)", "def _create_auto_key(self,identifier2organism={}):\n # when this is a locus in a dbwarehouse, abstract the genomedirname\n realdirname = os.path.realpath(self.dirname)\n if realdirname.find(\"/loci/\") > 0:\n key = os.path.basename(realdirname[0:realdirname.find(\"/loci/\")])\n if key: return key\n # if this point is reached, NOT a locus in dbwarehouse\n # check if we can map the gene's id to an organism ID\n if identifier2organism:\n for identifierpart,organism in identifier2organism.iteritems():\n if self.fref.find(identifierpart) == 0:\n # succesfull mapping\n return organism\n else:\n # mapping was not succesfull\n return self.fref\n else:\n return self.fref", "def uploaded_image_key(image_name=DEFAULT_IMAGE_NAME):\n return ndb.Key('Uploadeddata', image_name)", "def create_cache_key(cls, pk: Union[int, str]) -> str:\n if cls.__cache_key__:\n try:\n return cls.__cache_key__.format(**{cls.get_primary_key(): pk})\n except KeyError:\n pass\n raise NameError( # pramga: no cover\n 'The cache key is undefined or improperly defined in this model.'\n )", "def test_auto_unique_slug(self):\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\")\n self.assertEqual(story.slug, \"test-story\")\n story2 = create_story(title=\"Test Story\", summary=\"Test Summary 2\",\n byline=\"Test Byline 2\")\n self.assertEqual(story2.slug, \"test-story-2\")\n self.assertEqual(Story.objects.filter(slug=\"test-story\").count(), 1)", "def create_id():\n unique_id = UniqueId()\n unique_id.put()\n return unique_id.key().id()", "def create_id():\n unique_id = UniqueId()\n unique_id.put()\n return unique_id.key().id()", "def GenerateKey(self):\n self.key_name = self.key_name or str(uuid.uuid4())\n if self.key is None or not self.key.id():\n self.key = ndb.Key(self._get_kind(), self.key_name)\n return True\n return False", "def make_key(k, with_locale=True):\r\n key = encoding.smart_str('%s:%s' % (CACHE_PREFIX, k))\r\n if with_locale:\r\n key += encoding.smart_str(translation.get_language())\r\n # memcached keys must be < 250 bytes and w/o whitespace, but it's nice\r\n # to see the keys when using locmem.\r\n return hashlib.md5(key).hexdigest()", "def make_key(iden, *a, **kw):\n h = md5()\n\n def _conv(s):\n if isinstance(s, str):\n return s\n elif isinstance(s, unicode):\n return s.encode('utf-8')\n elif isinstance(s, (tuple, list)):\n return ','.join(_conv(x) for x in s)\n elif isinstance(s, dict):\n return ','.join('%s:%s' % (_conv(k), _conv(v))\n for (k, v) in sorted(s.iteritems()))\n else:\n return str(s)\n\n iden = _conv(iden)\n h.update(iden)\n h.update(_conv(a))\n h.update(_conv(kw))\n return '%s(%s)' % (iden, h.hexdigest())", "def initiate_new_key (self,key,index):\r\n\r\n #with shelf\r\n if self.using_shelf:\r\n\r\n\r\n self.key_dict[key] = {str(index)}\r\n\r\n #with database\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname, key,)\r\n db_cursor.execute(\"INSERT OR REPLACE \"\r\n +\"INTO all_keys (keyword, notebook)\"\r\n +\" VALUES (?,?);\",\r\n value_tuple)\r\n value_tuple = (notebookname, key, str(index))\r\n db_cursor.execute(\"INSERT OR REPLACE \"\r\n +\"INTO keys_to_indexes\"\r\n +\" (notebook, keyword, note_index)\"\r\n +\" VALUES (?,?,?);\",\r\n value_tuple)", "def get_store_key(asset):\n return '.'.join([asset.name, asset.uid, asset.ext])", "def delete_album_by_slug(slug): \n album = get_album_by_slug(slug)\n [x.delete() for x in ContributorAlbum.scan({\"slug\": condition.EQ(album.slug)})]\n album.delete()", "def test_key_kind(self):\r\n parent = ParentKind.objects.create(pk=1)\r\n child = ChildKind.objects.create(\r\n pk=2, parent=parent, parents=[parent.pk])\r\n self.assertEqual(child.parent.pk, parent.pk)\r\n self.assertEqual(child.parents[0], parent.pk)\r\n\r\n from google.appengine.api.datastore import Get\r\n from google.appengine.api.datastore_types import Key\r\n parent_key = Key.from_path(parent._meta.db_table, 1)\r\n child_key = Key.from_path(child._meta.db_table, 2)\r\n parent_entity = Get(parent_key)\r\n child_entity = Get(child_key)\r\n parent_column = child._meta.get_field('parent').column\r\n parents_column = child._meta.get_field('parents').column\r\n self.assertEqual(child_entity[parent_column], parent_key)\r\n self.assertEqual(child_entity[parents_column][0], parent_key)", "def save(self, *args, **kwargs):\n\n if not self.id:\n slug = slugify(self.name)\n i = 2\n while Ingredient.objects.filter(slug=slug):\n slug = '{slug}-{i}'\n i += 1\n self.slug = slug\n self.name = capwords(self.name)\n return super(Ingredient, self).save(*args, **kwargs)" ]
[ "0.6371879", "0.6166366", "0.5936699", "0.58009845", "0.5732777", "0.55733705", "0.55266577", "0.5525588", "0.5469047", "0.53936213", "0.538625", "0.53846484", "0.5360333", "0.5352547", "0.534155", "0.5337662", "0.5337662", "0.53374225", "0.5337322", "0.53343934", "0.533107", "0.5324481", "0.5324028", "0.5318831", "0.5313168", "0.5292753", "0.5286904", "0.5282209", "0.52726066", "0.52645755", "0.52383083", "0.5213912", "0.5201932", "0.51933134", "0.51919085", "0.51747984", "0.51605964", "0.5158243", "0.5145647", "0.5099833", "0.5092676", "0.50900453", "0.5063737", "0.5053441", "0.50509953", "0.50480044", "0.5043085", "0.50357085", "0.5027963", "0.5027963", "0.5027963", "0.5020235", "0.5018653", "0.5000563", "0.4997005", "0.49865565", "0.49821934", "0.4977128", "0.49738756", "0.49700177", "0.49637693", "0.49583706", "0.4950388", "0.4950388", "0.49390107", "0.49306765", "0.4923954", "0.49144298", "0.49076173", "0.49015096", "0.49011883", "0.4895377", "0.48928332", "0.48681748", "0.48581648", "0.48570094", "0.48478907", "0.48410448", "0.48241493", "0.48167485", "0.48158497", "0.48154142", "0.48144287", "0.48077163", "0.48021728", "0.4795386", "0.47942314", "0.4789389", "0.4787557", "0.47863057", "0.4783189", "0.4783189", "0.4770785", "0.47650376", "0.47615263", "0.47596154", "0.47498345", "0.47440398", "0.47438017", "0.47432336" ]
0.797927
0
Given an album slug, fetch the album entity
def get_album_by_slug(slug): album_key = get_album_key(slug) album = album_key.get() return album
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_album(album_id):\n return query_single(album_id, Album, album_schema)", "def album(self, q, page=None):\r\n return self.get('album', q, page)", "def album(self, uri, detail=None):\r\n extras = self.ALBUM_DETAIL.get(detail)\r\n return self.get(uri, extras)", "def get_album(self):\n return self._album", "def _get_album_or_image(json, imgur):\n if json['is_album']:\n return Gallery_album(json, imgur, has_fetched=False)\n return Gallery_image(json, imgur)", "def get_album_key(slug):\n err = 'Series slug must be defined and of of type basestring'\n\n if not slug or not isinstance(slug, basestring):\n raise RuntimeError(err)\n\n return ndb.Key(PHOTOALBUM_KIND, slug)", "def get_album(self, object_id, relation=None, **kwargs):\n return self.get_object(\"album\", object_id, relation=relation, **kwargs)", "def get_album(self) -> Optional[str]:\n return self.album", "def album(self, album_id, **kwargs):\n _id = self._get_album_id(album_id)\n # pylint: disable=no-member\n return self._get(API.ALBUM.value.format(id=_id), **kwargs)", "def get_album_by_id(self, album_id):\n self.app.curs.execute('select * from album where alid=%s', (album_id,))\n if self.app.curs.rowcount == 1:\n return self.app.curs.fetchone()\n else: # pragma: no cover\n return None", "def get_albums(entity_url: str) -> list:\n entity_url = entity_url.rstrip(\"/\")\n response = requests.get(entity_url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n albums = []\n for link in soup.find_all('a'):\n url = link.get('href')\n if url is not None and \"/album/\" in url:\n if url.startswith(\"http\"):\n albums.append(url)\n else:\n albums.append(f\"{entity_url}{url}\")\n return albums", "async def get_album(self, album_id: int) -> APIReturn:\n return await self._request(\"GET\", \"/getAlbum\", extra_query={\"id\": album_id})", "def get_random_album(self):\n lib = self.ctrl.library\n artist, album = lib.get_random_album()\n return self.resp_from_data({\n \"artist\": artist,\n \"album\": album,\n \"path\": lib.get_path(artist, album)\n })", "def search_for_album(album_name):\n\n print(f'Searching for album: {album_name}')\n\n search_result = spotifyObject.search(q=f'\"{album_name}\"', limit=20, type='album')\n\n items = search_result['albums']['items']\n\n results = []\n\n for item in items:\n if len(item['artists']) > 1:\n artists = tuple(art['name'] for art in item['artists'])\n else:\n artists = item['artists'][0]['name']\n\n results.append((artists, item['name'], item['id']))\n\n return results", "def get_album_list():\n\n # TODO: Paginate this, etc\n entities = PhotoAlbum.query().order(-PhotoAlbum.title).fetch(1000)\n\n return entities", "def get_albums_by_artist(self, artist_id):\n return self.__get('album', artist_id)", "def album(self):\n return self.getItunesAttribute('Album')", "def get_album(self, id):\n url = \"https://api.imgur.com/3/album/{0}\".format(id)\n json = self._send_request(url)\n return Album(json, self)", "def get_album_cover(self):\n artist = self.get_request_arg(\"artist\")\n album = self.get_request_arg(\"album\")\n if not (album and artist):\n return self.resp_from_data(\n {\"message\": \"Please specify a valid artist and album\"}, 403)\n else:\n cover = self.ctrl.library.get_cover_path(artist, album)\n return self.resp_from_image(cover)", "def album(self, album_id):\n if not isinstance(album_id, int):\n return \"the id should be an integer\"\n x = requests.get(\n f\"{Endpoints.base_url}album.get?apikey={self.api_key}&album_id={album_id}\"\n )\n if x.json()[\"message\"][\"header\"][\"status_code\"] == 401:\n return \"Invalid API key\"\n if x.json()[\"message\"][\"header\"][\"status_code\"] == 402:\n return (\n \"The usage limit has been reached, either you exceeded per day requests limits or your balance is \"\n \"insufficient. \"\n )\n if x.json()[\"message\"][\"header\"][\"status_code\"] == 403:\n return \"You are not authorized to perform this operation.\"\n if x.json()[\"message\"][\"header\"][\"status_code\"] == 404:\n return f\"No album with given ID:{album_id} found\"\n return x.json()", "def get_albums(self, offset=None):\n return self.__get('albums')", "async def search_album(album_name):\n async with aiohttp.ClientSession() as session:\n async with session.get('https://bandcamp.com/api/fuzzysearch/1/autocomplete?q=' + album_name) as resp:\n response = await resp.json()\n\n results = response.get('auto', {}).get('results', [])\n results = [res for res in results if res.get('type') == 'a']\n if not results:\n raise NotFound\n result = results[0]\n async with session.get(result.get('url', 'https://bandcamp.com/')) as resp:\n response = await resp.text()\n try:\n result['release_date'] = response.split('album_release_date: \"')[-1].split('\",')[0].split(':')[0]\n except:\n result['release_date'] = '01 Jan 1970 00'\n result['track_list'] = [getattr(aa.find('span'), 'text', '') for aa in bs4.BeautifulSoup(response, 'html.parser').find('table', {'class':'track_list'}).find_all('tr')]\n\n return BandcampAlbum(result)", "def get_albums():\n return query_multiple(request.args, album_search, \\\n album_filter, Album, albums_schema)", "def find_by_name(our_data,name):\n for album in our_data:\n if album['album'] == name:\n return album\n return None", "def get_album_url(self) -> Optional[str]:\n return self.album_url", "async def search_song(album_name):\n async with aiohttp.ClientSession() as session:\n async with session.get('https://bandcamp.com/api/fuzzysearch/1/autocomplete?q=' + album_name) as resp:\n response = await resp.json()\n\n results = response.get('auto', {}).get('results', [])\n results = [res for res in results if res.get('type') == 't']\n if not results:\n raise NotFound\n result = results[0]\n async with session.get(result.get('url', 'https://bandcamp.com/')) as resp:\n response = await resp.text()\n try:\n result['release_date'] = response.split('album_release_date: \"')[-1].split('\",')[0].split(':')[0]\n except:\n result['release_date'] = '01 Jan 1970 00'\n result['TrackAlbum'] = bs4.BeautifulSoup(response, 'html.parser').find('span', itemprop='inAlbum').text.strip()\n\n return BandcampSong(result)", "def get_album(self, album_id, only_active=True):\n options = {\n 'album_id': int(album_id),\n 'only_active': int(bool(only_active)),\n }\n return self._get('get_album', options)", "def fetch(cls, slug):\n try:\n article = Article.objects.get(slug=slug)\n except Article.DoesNotExist:\n raise exceptions.NotFound(f'Article of slug {slug} nonexistent')\n else:\n return article", "def fetch(cls, slug):\n try:\n article = Article.objects.get(slug=slug)\n except Article.DoesNotExist:\n raise exceptions.NotFound(f'Article with slug {slug} nonexistent')\n else:\n return article", "def get_albums(self):\n artist = self.get_request_arg(\"artist\")\n if artist:\n lib = self.ctrl.library\n lst = sorted(self.ctrl.library.get_albums(artist))\n albums = [{\"artist\": artist,\n \"album\": album,\n \"path\": lib.get_path(artist, album)} for album in lst]\n if lst:\n return self.resp_from_data(albums)\n return self.resp_from_data(\n {\"message\": f\"No album found for artist={artist}\"}, 400)", "def getAlbum(self, albumID):\n currAPIVersion = self.config['apiVersion']\n currAPIURL = URL_NEAPIS[sys._getframe().f_code.co_name]\n currAPIURL = currAPIURL[min(currAPIVersion, len(currAPIURL) - 1)]\n currDict = {\n }\n #modify in arguments\n currC, currR = self._mySubmit(currAPIURL, currDict, albumID)\n self.apiLog.info(\"%s Json Loads Begin\", sys._getframe().f_code.co_name)\n currR = json.loads(currR)\n self.apiLog.info(\"%s Json Loads End\", sys._getframe().f_code.co_name)\n self.updateCookie(currC)\n self.checkCode(currR['code'])\n\n return currR, currAPIURL[2]", "def delete_album_by_slug(slug): \n album = get_album_by_slug(slug)\n [x.delete() for x in ContributorAlbum.scan({\"slug\": condition.EQ(album.slug)})]\n album.delete()", "def get_artwork(session_, uri: str):\n return session_.query(CoverArt).filter_by(uri=uri).first()", "def media_album_name(self):\n return self._state.get(\"album\", None)", "def get_gallery_album(self, id):\n url = \"https://api.imgur.com/3/gallery/album/{0}\".format(id)\n resp = self._send_request(url)\n return Gallery_album(resp, self)", "def by_slug(cls: t.Type[Article], slug: str, db: Session) -> t.Optional[Article]:\n q = db.query(cls)\n q = q.filter(cls.slug == slug)\n return q.one_or_none()", "def _get_mb_album(albumname, **kwa):\n url = \"http://musicbrainz.org/ws/2/release/\"\n qargs = dict(\n release='\"%s\"' % albumname,\n primarytype=kwa.get(\"primarytype\", \"album\"),\n status=kwa.get(\"status\", \"official\"))\n qargs.update({k: '\"%s\"' % v for k, v in kwa.items()})\n qargs = [\"%s:%s\" % item for item in qargs.items()]\n qargs = {\"query\": \" AND \".join(qargs)}\n g.message = \"Album search for '%s%s%s'\" % (c.y, albumname, c.w)\n wdata = _do_query(url, qargs)\n\n if not wdata:\n return None\n\n ns = {'mb': 'http://musicbrainz.org/ns/mmd-2.0#'}\n root = ET.fromstring(wdata)\n rlist = root.find(\"mb:release-list\", namespaces=ns)\n\n if int(rlist.get('count')) == 0:\n return None\n\n album = rlist.find(\"mb:release\", namespaces=ns)\n artist = album.find(\"./mb:artist-credit/mb:name-credit/mb:artist\",\n namespaces=ns).find(\"mb:name\", namespaces=ns).text\n title = album.find(\"mb:title\", namespaces=ns).text\n aid = album.get('id')\n return dict(artist=artist, title=title, aid=aid)", "def get_albums_alpha(session_):\n artists = session_.query(Album).order_by(Album.title.asc()).all()\n return artists", "def getAlbums():\n\n r = requests.get(ALBUMS_URL, headers=HEADER, timeout=5)\n\n if r.status_code == 200:\n \n try:\n albums = [] \n soup = BeautifulSoup(r.text, \"html.parser\")\n album = soup.find_all(\"div\", class_=\"duv\")\n for i,al in enumerate(album): \n temp = {}\n temp['link'] = al.find_all(\"a\")[0]['href']\n temp['album'] = al.find_all(\"span\", class_=\"title\")[0].text\n albums.append(temp)\n\n if len(albums) > 0:\n return albums\n else:\n print(\"No albums found on site2!\")\n sys.exit(0)\n \n except Exception as e:\n print(\"Failed to get albums from site2\\n\", e)\n sys.exit(0)\n\n else:\n print(\"Albums Url fetch failed! Status code: {}\".format(r.status_code))\n sys.exit(0)", "def get(cls, name, artist=None, year=None):\n qry = cls.query().filter(FileRecord.album == name)\n\n if artist:\n qry = qry.filter(FileRecord.artist == artist)\n\n if year:\n qry = qry.filter(FileRecord.year == year)\n result = qry.first()\n\n if result and len(result) > 0:\n return cls(*result)\n else:\n return None", "def get_album_art(self, album_id):\n if not album_id:\n return None\n values = {\n 'action' : 'album',\n 'filter' : album_id,\n }\n\n root = self.__call_api(values)\n if not root:\n return None\n album = root.getElementsByTagName('album')[0]\n album_art = album.getElementsByTagName('art')[0].childNodes[0].data\n return album_art", "def get_album_cover(artist, album):\n # If we're allowed to use the cover art module\n if cover_art_module:\n cover_path = cover_art.get_cover(artist, album, os.path.expanduser(COVERS_FOLDER))\n # If it is valid, return it\n if cover_path:\n return cover_path\n # Otherwise, use a simplified approach and don't download new art\n else:\n # Just get what's available\n cover_path = cover_exists(artist, album, os.path.expanduser(COVERS_FOLDER))[0]\n if cover_path:\n return cover_path\n # If an image doesn't exist, return an icon\n return \"sonata\"", "def resolve_entity(root, info, slug: str) -> Optional[Entity]:\n try:\n return Entity.objects.get(slug=slug)\n except ObjectDoesNotExist:\n if slug.isnumeric():\n try:\n return Entity.objects.get(pk=int(slug))\n except GraphQLError:\n pass\n return None", "def get_album(album_id, albumtype):\n result = {'titel': '',\n 'artist': '',\n # 'artistid': '',\n 'artist_name': '',\n 'details': [('Label/jaar:', ''),\n ('Produced by:', ''),\n ('Credits:', ''),\n ('Bezetting:', ''),\n ('Tevens met:', '')],\n 'tracks': {},\n 'opnames': []}\n if album_id:\n album = dmla.list_album_details(album_id)\n result['titel'] = album.name\n result['artist'] = album.artist\n # result['artistid'] = album.artist.id\n result['artist_name'] = album.artist.get_name()\n text = album.label\n if album.release_year:\n if text:\n text += ', '\n text += str(album.release_year)\n result['details'] = [('Label/jaar:', text),\n ('Produced by:', album.produced_by),\n ('Credits:', album.credits),\n ('Bezetting:', album.bezetting),\n ('Tevens met:', album.additional)]\n if album:\n result['tracks'] = {x.volgnr: (x.name, x.written_by, x.credits)\n for x in dmla.list_tracks(album_id)}\n result['opnames'] = [(x.type, x.oms) for x in\n dmla.list_recordings(album_id)]\n if albumtype == 'live':\n result['details'].pop(0)\n return result", "def song_album(ans):\r\n albums = simple_album_list()\r\n for album in albums:\r\n songs = simple_songs_list(album)\r\n for song in songs:\r\n if ans == song:\r\n return album", "def cmd_album_id(client, args):\n album = client.get_album(args.album_id)\n data = album.__dict__\n generate_output({'album': data})", "def album_assignment(self):\n log.debug(\"Called album_assignment for %s.\" % self.name)\n self.success = False\n for splitter in splitters:\n if splitter in self.name:\n self.artist, self.album = self.name.split(splitter, 1) # May need to look at this again, can be more than 1!\n self.success = True\n break\n if self.success:\n results = self.sp.search(q='artist: ' + self.artist + 'album: ' + self.album, type='album', limit=1)\n if results['albums']['total'] >= 1:\n for items in results['albums']['items']:\n self.album = items['name']\n self.album_uri = items['uri']\n for artist in items['artists'][0]:\n self.artist = artist['name']\n self.artist_uri = artist['uri']\n else:\n self.success = False", "def get_by_slug(self, slug):\n return self.get(slug=slug)", "def test_retrieve_album(self, api_client, test_user):\n\n sample_photo_album(user=test_user)\n sample_photo_album(user=test_user, title=\"2019\")\n\n api_client.force_authenticate(test_user)\n res = api_client.get(PHOTO_ALBUM_URL)\n\n albums = PhotoAlbum.objects.all().order_by('-created')\n serializer = PhotoAlbumSerializer(albums, many=True)\n assert res.status_code == status.HTTP_200_OK\n assert res.data == serializer.data", "def get_albums(username):\n cur = mysql.connection.cursor()\n cur.execute(\"SELECT * FROM album WHERE username = '{0}'\".format(username))\n return cur.fetchall()", "def album_art(self):\n return self._album_art_url", "def get_albums(self):\n self.artist = self.artists_list.currentText()\n self.c_albums = [x['album'] for x in dmlc.list_albums(self.artist)\n if [x['album'] in self.albums_map[self.artist]]]\n self.albums_list.clear()\n self.albums_list.addItems(self.c_albums)\n self.update_navigation_buttons()", "def search_albums(self, needle):\n return self._album_search.search(searchable(needle))", "def test_get_album_id_various_album(self):\n album_id = self.add_album(artist='Various', album='Album')\n self.assertNotEqual(album_id, 0)\n track = Track(artist='Artist', album='Album', title='Title')\n track_album_id = self.app.set_album_id(track)\n self.assertEqual(track_album_id, album_id)\n self.assertEqual(track.album_id, album_id)", "def test_get_songs_by_album(self, track_elms, service_config, request):\n album_id = uuid.UUID(avalon.compat.to_uuid_input('f83fdec7-510f-44a5-87dc-61832669a582'))\n service_config.track_store.get_by_album.return_value = track_elms\n service_config.id_cache.get_album_id.return_value = album_id\n request.args['album'] = 'Album'\n params = avalon.web.request.Parameters(request)\n\n service = avalon.web.services.AvalonMetadataService(service_config)\n results = service.get_songs(params)\n\n assert results == track_elms, 'Expected matching tracks returned'\n service_config.track_store.get_by_album.assert_called_with(album_id)", "def test_ms_album_search(helpers):\n item_from_xml, item_from_dict = common_tests(\n MSAlbum,\n MS_ALBUM_SEARCH_XML,\n MS_ALBUM_SEARCH_DICT,\n \"00020064albumsearch:de unge\",\n helpers,\n )\n getter_attributes_test(\n \"artist\", item_from_xml, item_from_dict, MS_ALBUM_SEARCH_DICT.get(\"artist\")\n )\n getter_attributes_test(\n \"uri\", item_from_xml, item_from_dict, MS_ALBUM_SEARCH_DICT[\"uri\"]\n )", "def find_album_playlist(data):\n\n return data['album'].lower() + '.m3u'", "def media_album_name(self):\n media_status = self._media_status()[0]\n return media_status.album_name if media_status else None", "def test_get_album_id_regular_album(self):\n album_id = self.add_album(artist='Artist', album='Album')\n self.assertNotEqual(album_id, 0)\n track = Track(artist='Artist', album='Album', title='Title')\n track_album_id = self.app.set_album_id(track)\n self.assertEqual(track_album_id, album_id)\n self.assertEqual(track.album_id, album_id)", "def albums(self, albums, **kwargs):\n album_list = map(self._get_album_id, albums)\n return self._get(API.ALBUMS.value, ids=\",\".join(album_list), **kwargs)", "def media_album_name(self):\n return self._media_album", "def get_albums(self):\n return AlbumView.get_by_artist(self.name)", "def by_slug(slug, fail=True):\n q = DBSession.query(Feed).filter_by(slug=slug)\n if fail:\n return q.one()\n else:\n return q.first()", "def media_album_name(self):\n return self.coordinator.data.nowplaying[self.zone.SourceID].CurrSong.Album", "def media_album_artist(self):\n media_status = self._media_status()[0]\n return media_status.album_artist if media_status else None", "def get(request, slug):\n calbums = [x.json for x in get_album_contributors(get_album_by_slug(slug))]\n \n response = render_to_response(\n \"data/list.json\",\n {\"data\": calbums},\n content_type=\"application/json\",\n )\n response['Cache-Control'] = 'no-cache'\n return response", "def get_albums(self, ctx, page, templ_vars):\n if 'type' in page.meta and page.meta['type'] == 'index':\n album_pages = sorted(\n templ_vars['site']['categories']['gallery'],\n key=lambda album: album['datetime'],\n )\n albums = {}\n for album_page in album_pages:\n image_list = []\n images = map(\n lambda i: i['thumb_src'],\n self.albums[album_page['slug']]\n )\n image_list += images[:PREVIEW_IMGS_NUM]\n albums[album_page['slug']] = image_list\n templ_vars['site']['albums'] = albums", "def test_get_songs_by_album_id(self, track_elms, service_config, request):\n album_id = uuid.UUID(avalon.compat.to_uuid_input('37cac253-2bca-4a3a-be9f-2ac655e04ad8'))\n service_config.track_store.get_by_album.return_value = track_elms\n request.args['album_id'] = six.text_type(album_id)\n params = avalon.web.request.Parameters(request)\n\n service = avalon.web.services.AvalonMetadataService(service_config)\n results = service.get_songs(params)\n\n assert results == track_elms, 'Expected matching tracks returned'\n service_config.track_store.get_by_album.assert_called_with(album_id)", "def get_albums_by_artist(albumtype, search_for, sort_on):\n return list(dmla.list_albums_by_artist(albumtype, search_for, sort_on))", "def fetch_entity(endpoint, values):\n values['entity'] = Entity.objects.get_or_404(name=values['entity'])", "def getBySlug( self, person_slug ):\n qry = \"\"\"SELECT * FROM `%s`.`people` WHERE `slug` = \"%s\"; \"\"\" % ( self.db_name, Mysql.escape_string( person_slug ) )\n person = Mysql.ex( qry )\n return person[0]", "def get_album_art_url(html):\n\treturn re.findall('img src=\"(.*?)\" width=\"500\"', html)[0]", "def album_track(self, album_id, f_has_lyrics=1, page=1, page_size=10):\n if not isinstance(album_id, int):\n return \"the id should be an integer\"\n x = requests.get(\n f\"{Endpoints.base_url}album.tracks.get?apikey={self.api_key}&album_id={album_id}&f_has_lyrics={f_has_lyrics}&page={page}&page_size={page_size}\"\n )\n if x.json()[\"message\"][\"header\"][\"status_code\"] == 401:\n return \"Invalid API key\"\n if x.json()[\"message\"][\"header\"][\"status_code\"] == 402:\n return (\n \"The usage limit has been reached, either you exceeded per day requests limits or your balance is \"\n \"insufficient. \"\n )\n if x.json()[\"message\"][\"header\"][\"status_code\"] == 403:\n return \"You are not authorized to perform this operation.\"\n if x.json()[\"message\"][\"header\"][\"status_code\"] == 404:\n return f\"No album with given ID:{album_id} found\"\n return x.json()", "def test_album(app):\n with app.app_context():\n blob = create_record(CDS_ALBUM)\n model = matcher(blob, 'cds_dojson.marc21.models')\n\n assert model == marc21\n\n data = model.do(blob)\n assert data['physical_medium'][1][\n 'material_base_and_configuration'] == ('Neg NB 6 x 6', )\n assert data['images'][3]['$ref'] == 'http://cds.cern.ch/record/1782448'\n assert data['images'][3]['relation'] == 'Cover'\n assert data['imprint'][0]['_complete_date'] == 'Sep 1970'\n assert data['imprint'][0]['complete_date'] == '1970-09-01'\n assert data['place_of_photo'] == [\n {'place': 'CERN PS', 'requester': 'PHILIPPS'}]\n\n # Check that no fields are missing their model\n assert model.missing(blob) == []", "def get_article(self, slug):\n\t\tarticle = Blog.objects.get(slug=slug)\n\t\treturn article", "def get_songs_by_album(self, album_id):\n return self.__get('song', album_id)", "def get_top_albums(\n self, period: Period, limit: int = 50, page: int = 1\n ) -> ListModel[Album]:\n assert isinstance(period, Period)\n\n return self.retrieve(\n bind=Album,\n flatten=\"album\",\n params=dict(\n method=\"user.getTopAlbums\",\n user=self.name,\n limit=limit,\n page=page,\n period=period.value,\n ),\n )", "def test_get_first_page(db_session):\n query_params = {\n \"sort\": \"album_id\"\n }\n album_resource = AlbumResource(session=db_session)\n parser = ModelQueryParamParser(query_params)\n offset_limit_info = parser.parse_offset_limit(page_max_size=30)\n offset = offset_limit_info.offset\n limit = offset_limit_info.limit\n result = album_resource.get_collection(\n filters=parser.parse_filters(album_resource.model),\n sorts=parser.parse_sorts(),\n limit=limit,\n offset=offset\n )\n assert len(result) == 30\n assert result[0][\"album_id\"] == 1", "def getAlbumsFromArtist(artistLink):\n artistLink = str(artistLink)\n url = \"http://www.ohhla.com/\"+artistLink\n if artistLink[0:4]==\"http:\":\n url = artistLink\n try:\n html = urllib.request.urlopen(url).read()\n soup = bs4.BeautifulSoup(html, 'html.parser')\n table = soup.findAll(\"a\")[5:]\n albumLinks = []\n for entry in table:\n text = str(re.findall(\"\\\".*\\\"\", str(entry)))\n text = re.sub(\"[\\]\\['\\\"]\", \"\", text)\n link = url + str(text)\n if len(re.findall(\"(?:http)\",link)) == 1:\n albumLinks.append(link)\n except:\n return []\n return albumLinks", "def test_get_all_unassociated_single_track_with_album(self):\n track = Track(artist='Artist', album='Album', title='Title')\n track.insert(self.app.db, self.app.curs,\n 'xmms', datetime.datetime.now())\n self.assertEqual(self.get_track_count(), 1)\n tracks = Track.get_all_unassociated(self.app.curs)\n self.assertEqual(len(tracks), 1)\n self.assertEqual(tracks[0].artist, 'Artist')\n self.assertEqual(tracks[0].title, 'Title')\n self.assertEqual(tracks[0].album_id, 0)", "def test_get_album_id_none_found(self):\n track = Track(artist='Artist', album='Album', title='Title')\n album_id = self.app.set_album_id(track)\n self.assertEqual(album_id, 0)\n self.assertEqual(track.album_id, 0)", "def handle_cover_art_add(session_, uri, album: Album, flush=False):\n existing = get_artwork(session_, uri)\n if not existing:\n existing = CoverArt(uri=uri, album_id=album.id, album=album)\n session_.add(existing)\n if flush:\n session_.flush()\n return existing", "def make_album(artist_name, album_title): \n music_album = {\n 'Artist': artist_name.title(),\n 'Album': album_title.title()\n }\n return music_album", "def find_song_uri(self, song):\n\n try:\n tracks = self.search_song(song[\"name\"], album=song[\"album\"], artist=song[\"artist\"])\n except SongNotFoundError:\n try:\n tracks = self.search_song(song[\"name\"], artist=song[\"artist\"])\n except SongNotFoundError:\n tracks = self.search_song(song[\"name\"])\n\n result = tracks[0]\n uri = result[\"uri\"]\n return uri", "def fetchAlbumInfo(album_id):\n url = 'https://api.spotify.com/v1/albums/' + album_id\n req = requests.get(url)\n\n data = req.json() \n\n if not req.ok:\n print \"error : \" + data['error']['message']\n return {}\n\n\n #create a new dictionary\n album_info_dict = {}\n #keys for the dictionary\n album_info_dict['artist_id'] = data['artists'][0]['id']\n album_info_dict['album_id'] = album_id\n album_info_dict['name'] = data['name']\n album_info_dict['year'] = data['release_date'][0:4]\n album_info_dict['popularity'] = int(data['popularity']) #Spotify's popularity-meter, an integer\n\n return album_info_dict", "def media_album_name(self):\n return self._media_album_name", "def set_album(self, album: str) -> None:\n self.album = album", "def make_album(artist, title):\n album_dict = {\n 'artist': artist.title(),\n 'title': title.title(),\n }\n return album_dict", "def get_album_artist(self) -> Optional[str]:\n return self.album_artist", "def test_03_portfolio_get_specific_instance_by_slug(self):\n self.assertEqual(Portfolio.get_portfolio_by_slug(\"test\"), Portfolio.get_all()[0],\n msg=\"Portfolio is NOT returning a valid instance for a specific slug\")\n print(\"Portfolio get_portfolio_by_slug method is returning the following instance: {}\".format(\n Portfolio.get_portfolio_by_slug(\"test\"),\n ))", "def get_albums(self, limit=None):\n url = (\"https://api.imgur.com/3/account/{0}/albums/{1}\".format(self.name,\n '{}'))\n resp = self._imgur._send_request(url, limit=limit)\n return [Album(alb, self._imgur, False) for alb in resp]", "def get_track(session_, uri) -> Track:\n return session_.query(Track).filter_by(uri=uri).first()", "def test_get_albums_query_param(self, id_name_elms, service_config, request):\n service_config.search.search_albums.return_value = id_name_elms\n request.args['query'] = 'Dummy'\n params = avalon.web.request.Parameters(request)\n\n service = avalon.web.services.AvalonMetadataService(service_config)\n results = service.get_albums(params)\n\n assert results == id_name_elms, 'Expected matching albums returned'", "def get_artist(artist_id):\n return query_single(artist_id, Artist, artist_schema)", "def get_entity(endpoint):\n _entity, _id = parser_endpoint(endpoint)\n\n return _entity", "def get_blurb(album_url: str) -> str:\n response = requests.get(album_url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n meta = soup.find(\"meta\", content=True)\n blurb = html.unescape(meta.get(\"content\"))\n return blurb", "def get_by_artist(cls, artist):\n results = cls.query().filter(FileRecord.artist == artist).order_by(\n FileRecord.year).all()\n albums = []\n if results and len(results) > 0:\n for result in results:\n albums.append(cls(*result))\n\n return albums\n\n return albums", "def get_url_from_album_name(browser, name: str) -> str:\n album_name = name.split(\"-\")[1].strip()\n artist_name = name.split(\"-\")[0].strip()\n artist_url = get_url_from_artist_name(browser, artist_name)\n\n logger.debug(\"Searching for %s at %s\", album_name, artist_url)\n browser.get_url(artist_url)\n soup = browser.get_soup()\n artist_album_list = [\n [x.text.strip(), \"https://rateyourmusic.com\" + x.find(\"a\")[\"href\"]]\n for x in soup.find_all(\"div\", {\"class\": \"disco_mainline\"})\n ]\n artist_album_url = [x[1] for x in artist_album_list]\n artist_album_name = [x[0] for x in artist_album_list]\n\n url_match = artist_album_url[\n artist_album_name.index(\n get_close_matches_icase(album_name, artist_album_name)[0]\n )\n ]\n logger.debug(\"Best match : %s\", url_match)\n return url_match", "def get_item_by_slug(item_slug):\n return session.query(Items).filter_by(slug=item_slug).first()", "def get_article(uuid):\n return Article.get(Article.uuid == uuid)" ]
[ "0.75652915", "0.70452404", "0.67959744", "0.65954405", "0.65209013", "0.64783317", "0.6462776", "0.6453332", "0.643563", "0.64184994", "0.62658083", "0.6256562", "0.6231396", "0.6121484", "0.6022898", "0.60085446", "0.59636325", "0.5959914", "0.59453434", "0.592426", "0.5915406", "0.58989745", "0.5883402", "0.58545595", "0.57640034", "0.5754611", "0.57439524", "0.57378596", "0.5736686", "0.5736287", "0.5705906", "0.5658346", "0.5654577", "0.56172764", "0.56099796", "0.55999136", "0.55918187", "0.5572819", "0.5567803", "0.5559026", "0.55587727", "0.55457014", "0.5536974", "0.55225086", "0.5517811", "0.5514247", "0.5493458", "0.5456094", "0.5440897", "0.5398218", "0.5387969", "0.53662235", "0.5314496", "0.53041357", "0.5298282", "0.5290247", "0.5283593", "0.5280346", "0.5275494", "0.5264904", "0.52555823", "0.52487814", "0.52362657", "0.52319044", "0.52253664", "0.52083474", "0.52050525", "0.52045804", "0.52018857", "0.52010155", "0.5198278", "0.51822966", "0.5170915", "0.5155267", "0.51549995", "0.5154975", "0.514888", "0.5116584", "0.5110301", "0.5097841", "0.5095303", "0.507884", "0.5064138", "0.50506836", "0.50473607", "0.5042809", "0.50371933", "0.5033143", "0.50284165", "0.49927184", "0.49842152", "0.4978515", "0.49702615", "0.49694493", "0.49423352", "0.49396855", "0.49364084", "0.49309993", "0.49276188", "0.49207768" ]
0.80606824
0
Fetch a list of Albums
def get_album_list(): # TODO: Paginate this, etc entities = PhotoAlbum.query().order(-PhotoAlbum.title).fetch(1000) return entities
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_albums():\n return query_multiple(request.args, album_search, \\\n album_filter, Album, albums_schema)", "def get_albums(self):\n artist = self.get_request_arg(\"artist\")\n if artist:\n lib = self.ctrl.library\n lst = sorted(self.ctrl.library.get_albums(artist))\n albums = [{\"artist\": artist,\n \"album\": album,\n \"path\": lib.get_path(artist, album)} for album in lst]\n if lst:\n return self.resp_from_data(albums)\n return self.resp_from_data(\n {\"message\": f\"No album found for artist={artist}\"}, 400)", "def albums(self, albums, **kwargs):\n album_list = map(self._get_album_id, albums)\n return self._get(API.ALBUMS.value, ids=\",\".join(album_list), **kwargs)", "def get_albums(entity_url: str) -> list:\n entity_url = entity_url.rstrip(\"/\")\n response = requests.get(entity_url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n albums = []\n for link in soup.find_all('a'):\n url = link.get('href')\n if url is not None and \"/album/\" in url:\n if url.startswith(\"http\"):\n albums.append(url)\n else:\n albums.append(f\"{entity_url}{url}\")\n return albums", "def GetAlbums(self, start=0, end=0, sortmethod='label', sortorder='ascending', filter='', artistid=None):\n self.logger.debug(\"Loading all albums for ARTISTID \" + str(artistid))\n try:\n xbmc = Server(self.url('/jsonrpc', True))\n sort = {'order': sortorder, 'method': sortmethod, 'ignorearticle': True}\n properties=['artist', 'title', 'year', 'description', 'thumbnail']\n limits = {'start': int(start), 'end': int(end)}\n if artistid is not None:\n filter = {'artistid': int(artistid)}\n else:\n filter = {'or': [\n {'field': 'album', 'operator': 'contains', 'value': filter},\n {'field': 'artist', 'operator': 'contains', 'value': filter}\n ]}\n return xbmc.AudioLibrary.GetAlbums(properties=properties, limits=limits, sort=sort, filter=filter)\n except ValueError:\n return", "def get_albums(self):\n self.artist = self.artists_list.currentText()\n self.c_albums = [x['album'] for x in dmlc.list_albums(self.artist)\n if [x['album'] in self.albums_map[self.artist]]]\n self.albums_list.clear()\n self.albums_list.addItems(self.c_albums)\n self.update_navigation_buttons()", "def simple_album_list():\r\n album_list = []\r\n data = dbase()\r\n for album in data.keys():\r\n album_list += [album]\r\n return album_list", "def get_albums_alpha(session_):\n artists = session_.query(Album).order_by(Album.title.asc()).all()\n return artists", "def get_albums(self, limit=None):\n url = (\"https://api.imgur.com/3/account/{0}/albums/{1}\".format(self.name,\n '{}'))\n resp = self._imgur._send_request(url, limit=limit)\n return [Album(alb, self._imgur, False) for alb in resp]", "def get_albums(self, offset=None):\n return self.__get('albums')", "def get_albums(username):\n cur = mysql.connection.cursor()\n cur.execute(\"SELECT * FROM album WHERE username = '{0}'\".format(username))\n return cur.fetchall()", "def fetchAlbumIds(artist_id):\n url = 'https://api.spotify.com/v1/artists/' + artist_id + '/albums?market=US&album_type=album'\n req = requests.get(url)\n\n data = req.json()\n\n #checking for bad return value\n if not req.ok:\n print \"error : \" + data['error']['message']\n return \"error : \" + data['error']['message']\n\n albums = []\n for item in data['items']:\n \talbums.append(item['id'])\n\n return albums", "def album_list(self):\n\n artist_id = self.addon_args[\"artist_id\"][0]\n\n xbmcplugin.setContent(self.addon_handle, \"albums\")\n\n for album in self.connection.walk_artist(artist_id):\n self.add_album(album)\n\n xbmcplugin.addSortMethod(\n self.addon_handle, xbmcplugin.SORT_METHOD_UNSORTED)\n xbmcplugin.addSortMethod(\n self.addon_handle, xbmcplugin.SORT_METHOD_ALBUM)\n xbmcplugin.addSortMethod(\n self.addon_handle, xbmcplugin.SORT_METHOD_ARTIST)\n xbmcplugin.addSortMethod(\n self.addon_handle, xbmcplugin.SORT_METHOD_VIDEO_YEAR)\n\n xbmcplugin.endOfDirectory(self.addon_handle)", "def getAlbums():\n\n r = requests.get(ALBUMS_URL, headers=HEADER, timeout=5)\n\n if r.status_code == 200:\n \n try:\n albums = [] \n soup = BeautifulSoup(r.text, \"html.parser\")\n album = soup.find_all(\"div\", class_=\"duv\")\n for i,al in enumerate(album): \n temp = {}\n temp['link'] = al.find_all(\"a\")[0]['href']\n temp['album'] = al.find_all(\"span\", class_=\"title\")[0].text\n albums.append(temp)\n\n if len(albums) > 0:\n return albums\n else:\n print(\"No albums found on site2!\")\n sys.exit(0)\n \n except Exception as e:\n print(\"Failed to get albums from site2\\n\", e)\n sys.exit(0)\n\n else:\n print(\"Albums Url fetch failed! Status code: {}\".format(r.status_code))\n sys.exit(0)", "def getAlbums(owner_id=None, album_ids=None, offset=None, count=None, need_system=None,\\\n need_covers=None, photo_sizes=None):\n params = {\n 'owner_id': owner_id,\n 'album_ids': album_ids,\n 'offset': offset,\n 'count': count,\n 'need_system': need_system,\n 'need_covers': need_covers,\n 'photo_sizes': photo_sizes\n }\n result = call('photos.getAlbums', **params)\n return parse_response(result)", "def search_for_album(album_name):\n\n print(f'Searching for album: {album_name}')\n\n search_result = spotifyObject.search(q=f'\"{album_name}\"', limit=20, type='album')\n\n items = search_result['albums']['items']\n\n results = []\n\n for item in items:\n if len(item['artists']) > 1:\n artists = tuple(art['name'] for art in item['artists'])\n else:\n artists = item['artists'][0]['name']\n\n results.append((artists, item['name'], item['id']))\n\n return results", "def get_albums(self):\n return AlbumView.get_by_artist(self.name)", "def get_albums_by_artist(albumtype, search_for, sort_on):\n return list(dmla.list_albums_by_artist(albumtype, search_for, sort_on))", "def cmd_account_albums(client, args):\n account_albums = client.get_account_albums(args.username, args.page)\n data = [item.__dict__ for item in account_albums]\n generate_output({'account_albums': data}, args.output_file)", "def get_album(album_id):\n return query_single(album_id, Album, album_schema)", "def read_artist_albums(id, name):\n list_a = [(x.name, str(x.release_year), str(x.id))\n for x in dmla.list_albums_by_artist('', id, 'Jaar')]\n list_c = [(x['album'], x['year']) for x in dmlc.list_albums(name)]\n return list_a, list_c", "def get_by_artist(cls, artist):\n results = cls.query().filter(FileRecord.artist == artist).order_by(\n FileRecord.year).all()\n albums = []\n if results and len(results) > 0:\n for result in results:\n albums.append(cls(*result))\n\n return albums\n\n return albums", "def get_top_albums(\n self, period: Period, limit: int = 50, page: int = 1\n ) -> ListModel[Album]:\n assert isinstance(period, Period)\n\n return self.retrieve(\n bind=Album,\n flatten=\"album\",\n params=dict(\n method=\"user.getTopAlbums\",\n user=self.name,\n limit=limit,\n page=page,\n period=period.value,\n ),\n )", "def get(request, slug):\n calbums = [x.json for x in get_album_contributors(get_album_by_slug(slug))]\n \n response = render_to_response(\n \"data/list.json\",\n {\"data\": calbums},\n content_type=\"application/json\",\n )\n response['Cache-Control'] = 'no-cache'\n return response", "def albums(date, album_type, genre):\n urlhandle = f\"{amazon_charts_url}/albums\"\n params = {\n \"type\": album_type,\n \"date\": date,\n \"genre\": genre,\n }\n\n data = utilities.RequestData(urlhandle, params)\n return utilities.RequestGet(data)[\"data\"]", "def simple_songs_list(name_of_album):\r\n songs = []\r\n data1 = dbase()\r\n data1 = data1[name_of_album][0]\r\n for song in data1.keys():\r\n songs += [song]\r\n return songs", "def get_tracks_from_albums(sp, album_uri_list):\n\n track_list = [[\"track_name\", \"track_uri\", \"track_release_date\"]]\n\n print(\"Log: Pulling data from Spotify. This can take a while...\")\n\n for album_uri in album_uri_list:\n album_tracks = sp.album_tracks(album_uri, limit=50, offset=0)[\"items\"]\n count_tracks_in_album = len(album_tracks)\n album_release_date = sp.album(album_uri)[\"release_date\"]\n\n # This part is probably very slow and should be improved by accessing the API less often\n for track_number in range(count_tracks_in_album):\n track_name = album_tracks[track_number][\"name\"]\n track_uri = album_tracks[track_number][\"uri\"]\n \n track_list.append([track_name, track_uri, album_release_date])\n\n # Create df from list of tracks for all albums\n track_df = pd.DataFrame(data=track_list[1:], columns=track_list[0])\n \n print(\"Log: Finished pulling all tracks from albums.\")\n return track_df", "def tracked_albums():\n print('Your Google Photos Albums ([X] = tracked):')\n albums = get_albums(service)\n for i, a in enumerate(albums):\n check = 'X' if a.id in library.get_album_ids() else ' '\n print('[{}] {}. {}'.format(check, i+1, a.title))\n return albums", "def cmd_album_images(client, args):\n album_images = client.get_album_images(args.album_id)\n data = [item.__dict__ for item in album_images]\n generate_output({'album_images': data}, args.output_file)", "def get_albums(playlist_name):\n\n playlist_id = find_playlist(playlist_name)\n \n items = get_playlist_tracks(playlist_id=playlist_id)\n \n track_values = []\n \n for item in items:\n track = item['track']\n album = track['album']\n artists = tuple(artist['name'] for artist in album['artists'])\n \n track_values.append((album['name'], artists[0]))\n \n album_details = namedtuple('AlbumDetails', 'album artist')\n \n for tup in dict.fromkeys(track_values):\n yield album_details(*tup)", "def get_albums(self, only_published=True, only_active=True, list_only=False,\n only_smart=False, exclude_smart=False, tags=None, tags_exclusive=True):\n if not tags:\n tags = []\n\n if not hasattr(tags, '__iter__'):\n tags = [tags]\n\n options = {\n 'only_published': int(bool(only_published)),\n 'only_active': int(bool(only_active)),\n 'list_only': int(bool(list_only)),\n 'only_smart': int(bool(only_smart)),\n 'exclude_smart': int(bool(exclude_smart)),\n }\n\n if tags:\n options.update({'tags': ','.join(str(tag) for tag in tags),\n 'tags_exclusive': int(bool(tags_exclusive))})\n\n data = self._get('get_album_list', options)\n return data['albums']", "def album(self, q, page=None):\r\n return self.get('album', q, page)", "def albums_by_genre_list(self):\n\n genre = self.addon_args[\"foldername\"][0].decode(\"utf-8\")\n\n xbmcplugin.setContent(self.addon_handle, \"albums\")\n\n for album in self.connection.walk_album_list_genre(genre):\n self.add_album(album, show_artist=True)\n\n xbmcplugin.endOfDirectory(self.addon_handle)", "def GetRecentAlbums(self, limit=5):\n self.logger.debug(\"Fetching recently added Music\")\n try:\n xbmc = Server(self.url('/jsonrpc', True))\n properties = ['artist', 'albumlabel', 'year', 'description', 'thumbnail']\n limits = {'start': 0, 'end': int(limit)}\n return xbmc.AudioLibrary.GetRecentlyAddedAlbums(properties=properties, limits=limits)\n except:\n self.logger.error(\"Unable to fetch recently added Music!\")\n return", "def get_tracks(self):\n artist = self.get_request_arg(\"artist\")\n album = self.get_request_arg(\"album\")\n if not (album and artist):\n return self.resp_from_data(\n {\"message\": \"Please specify a valid artist and album\"}, 403)\n else:\n tracks = self.ctrl.library.get_tracks(artist, album)\n return self.resp_from_data(tracks)", "def get_albums_by_artist(self, artist_id):\n return self.__get('album', artist_id)", "def test_get_songs_by_album(self, track_elms, service_config, request):\n album_id = uuid.UUID(avalon.compat.to_uuid_input('f83fdec7-510f-44a5-87dc-61832669a582'))\n service_config.track_store.get_by_album.return_value = track_elms\n service_config.id_cache.get_album_id.return_value = album_id\n request.args['album'] = 'Album'\n params = avalon.web.request.Parameters(request)\n\n service = avalon.web.services.AvalonMetadataService(service_config)\n results = service.get_songs(params)\n\n assert results == track_elms, 'Expected matching tracks returned'\n service_config.track_store.get_by_album.assert_called_with(album_id)", "def test_retrieve_album(self, api_client, test_user):\n\n sample_photo_album(user=test_user)\n sample_photo_album(user=test_user, title=\"2019\")\n\n api_client.force_authenticate(test_user)\n res = api_client.get(PHOTO_ALBUM_URL)\n\n albums = PhotoAlbum.objects.all().order_by('-created')\n serializer = PhotoAlbumSerializer(albums, many=True)\n assert res.status_code == status.HTTP_200_OK\n assert res.data == serializer.data", "def album_list_for_user():\r\n answer = \"\"\r\n data = dbase()\r\n for album in data.keys():\r\n answer += album + \", \"\r\n return answer[:-2]", "def read_album_tracks(id, artist_name, album_name):\n list_a = [x.name for x in dmla.list_tracks(id)]\n list_c = [x['title'] for x in dmlc.list_tracks_for_album(artist_name, album_name)\n if x['track'] != -1]\n return list_a, list_c", "async def search_album(album_name):\n async with aiohttp.ClientSession() as session:\n async with session.get('https://bandcamp.com/api/fuzzysearch/1/autocomplete?q=' + album_name) as resp:\n response = await resp.json()\n\n results = response.get('auto', {}).get('results', [])\n results = [res for res in results if res.get('type') == 'a']\n if not results:\n raise NotFound\n result = results[0]\n async with session.get(result.get('url', 'https://bandcamp.com/')) as resp:\n response = await resp.text()\n try:\n result['release_date'] = response.split('album_release_date: \"')[-1].split('\",')[0].split(':')[0]\n except:\n result['release_date'] = '01 Jan 1970 00'\n result['track_list'] = [getattr(aa.find('span'), 'text', '') for aa in bs4.BeautifulSoup(response, 'html.parser').find('table', {'class':'track_list'}).find_all('tr')]\n\n return BandcampAlbum(result)", "def cmd_account_album_ids(client, args):\n account_album_ids = client.get_account_album_ids(args.username, args.page)\n generate_output({'account_album_ids': account_album_ids})", "def advAlbums(request,advId=None):\n if request.method == 'GET':\n adv = Adventure.objects.get(id=advId)\n albums = Album.objects.filter(adv=adv)\n albumSerializer = AlbumSerializer(albums, many=True)\n return JsonResponse(albumSerializer.data, safe=False)", "def test_get_resources_ordered(db_session):\n query_params = {\n \"sort\": \"-album_id,title\"\n }\n parser = ModelQueryParamParser(query_params)\n album_resource = AlbumResource(session=db_session)\n result = album_resource.get_collection(\n filters=parser.parse_filters(album_resource.model),\n sorts=parser.parse_sorts()\n )\n assert len(result) == 347\n assert result[0][\"album_id\"] == 347", "def get_songs_by_album(self, album_id):\n return self.__get('song', album_id)", "def get_albums(self, ctx, page, templ_vars):\n if 'type' in page.meta and page.meta['type'] == 'index':\n album_pages = sorted(\n templ_vars['site']['categories']['gallery'],\n key=lambda album: album['datetime'],\n )\n albums = {}\n for album_page in album_pages:\n image_list = []\n images = map(\n lambda i: i['thumb_src'],\n self.albums[album_page['slug']]\n )\n image_list += images[:PREVIEW_IMGS_NUM]\n albums[album_page['slug']] = image_list\n templ_vars['site']['albums'] = albums", "def current_user_saved_albums(self, limit=20, offset=0, **kwargs):\n return self._get(API.MY_ALBUMS.value, limit=limit, offset=offset, **kwargs)", "def get_photos(album_id):\n opts = RUN_OPTS.opts\n\n payload = {'albumId': album_id}\n try:\n resp = requests.get(PHOTOS_URL, params=payload,\n timeout=opts['timeout'])\n except (requests.ConnectionError, requests.ReadTimeout):\n print(f\"get for {PHOTOS_URL} errored or timed out after \" +\n f\"{opts['timeout']} seconds\")\n return None\n\n print_debug(f\"http status_code = {resp.status_code}; url = {resp.url}\")\n\n if not resp.ok:\n print(f'url for album_id={album_id} or \"{resp.url}\" not found: ' +\n f'status {resp.status_code}')\n return None\n\n photos = resp.json()\n if not resp.ok or len(photos) <= 0:\n print(f'zero rows returned for album_id={album_id}')\n return None\n\n return photos", "def get_albums_from_artists(sp, artist_uri_list):\n\n # Create header for output df\n albums_list = [[\"name\", \"album_uri\", \"album_release_date\", \"artist_uri\"]]\n\n print(\"Log: Pulling data from Spotify. This can take a while...\")\n\n # Loop through list of artist uris\n for artist_uri in artist_uri_list:\n # Get album from artist\n albums = sp.artist_albums(artist_uri)\n \n # Append each album to list\n for album in albums[\"items\"]:\n album_name = album[\"name\"]\n album_uri = album[\"uri\"]\n album_release_date = album[\"release_date\"]\n albums_list.append([album_name, album_uri, album_release_date, artist_uri])\n\n # Create df from list of albums for all artist\n albums_df = pd.DataFrame(data=albums_list[1:], columns=albums_list[0])\n\n print(\"Log: Finished pulling all albums from artist.\")\n return albums_df", "def album(self, album_id, **kwargs):\n _id = self._get_album_id(album_id)\n # pylint: disable=no-member\n return self._get(API.ALBUM.value.format(id=_id), **kwargs)", "def get_random_album(self):\n lib = self.ctrl.library\n artist, album = lib.get_random_album()\n return self.resp_from_data({\n \"artist\": artist,\n \"album\": album,\n \"path\": lib.get_path(artist, album)\n })", "def album_tracks(self, album_id, limit=50, offset=0, **kwargs):\n _id = self._get_album_id(album_id)\n # pylint: disable=no-member\n return self._get(\n API.ALBUM_TRACKS.value.format(id=_id), limit=limit, offset=offset, **kwargs\n )", "async def get_album(self, album_id: int) -> APIReturn:\n return await self._request(\"GET\", \"/getAlbum\", extra_query={\"id\": album_id})", "def test_get_all_need_transform_two_albums(self):\n album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=120)\n pk = album.insert(self.app.db, self.app.curs)\n album = Album(artist='Artist', album='Album 2',\n totaltracks=1, totalseconds=120)\n pk = album.insert(self.app.db, self.app.curs)\n self.assertEqual(self.get_album_count(), 2)\n\n albums = Album.get_all_need_transform(self.app.curs, 1)\n self.assertEqual(len(albums), 2)", "def test_top_albums(self):\n \n rss = AppleRSS()\n objs = rss.get_top_albums(limit=10)\n \n self.__test_artists('top_albums', objs)", "def artist_albums(\n self, artist_id, album_type=None, country=None, limit=20, offset=0, **kwargs\n ):\n _id = self._get_artist_id(artist_id)\n return self._get(\n API.ARTIST_ALBUMS.value.format(id=_id), # pylint: disable=no-member\n album_type=album_type,\n country=country,\n limit=limit,\n offset=offset,\n **kwargs,\n )", "def test_get_albums_no_params(self, id_name_elms, service_config, request):\n service_config.album_store.get_all.return_value = id_name_elms\n params = avalon.web.request.Parameters(request)\n\n service = avalon.web.services.AvalonMetadataService(service_config)\n results = service.get_albums(params)\n\n assert results == id_name_elms, 'Expected all albums returned'", "def GetArtists(self, start=0, end=0, sortmethod='artist', sortorder='ascending', filter=''):\n self.logger.debug(\"Fetching all artists in the music database\")\n try:\n xbmc = Server(self.url('/jsonrpc', True))\n sort = {'order': sortorder, 'method': sortmethod, 'ignorearticle': True}\n properties = ['thumbnail', 'fanart']\n limits = {'start': int(start), 'end': int(end)}\n filter = {'field': 'artist', 'operator': 'contains', 'value': filter}\n return xbmc.AudioLibrary.GetArtists(properties=properties, limits=limits, sort=sort, filter=filter)\n except ValueError:\n logger.error(\"Unable to fetch artists!\")\n return", "def selectSongs():\n\tsql =\"select songs.title, artist.name, album.name from songs, album, \" \\\n\t+ \"artist join songs_album on songs.id=songs_album.songs_id \" \\\n\t+ \"join songs_artist on songs.id=songs_artist.songs_id \" \\\n\t+ \"where album.id=songs_album.album_id \" \\\n\t+ \"and artist.id=songs_artist.artist_id\"\n\tc, conn = connect()\n\tretr = c.execute(sql)\n\tsongs = []\n\tfor entry in retr:\n\t\tsongs.append(music.song(title=entry[0], artist=entry[1], album=entry[2]))\n\treturn songs", "def get_labels():\n session = get_session()\n try:\n query = session.query(Album.label).distinct()\n final_query = query\n labels = query.all()\n label_list = []\n for label in labels:\n label_list.append(label[0])\n count = final_query.count()\n return jsonify({\n 'items': label_list,\n 'count': count\n })\n finally:\n session.close()", "def get_all_songs():\r\n return [Song.song_json(song) for song in Song.query.all()]", "def test_get_songs_by_album_id(self, track_elms, service_config, request):\n album_id = uuid.UUID(avalon.compat.to_uuid_input('37cac253-2bca-4a3a-be9f-2ac655e04ad8'))\n service_config.track_store.get_by_album.return_value = track_elms\n request.args['album_id'] = six.text_type(album_id)\n params = avalon.web.request.Parameters(request)\n\n service = avalon.web.services.AvalonMetadataService(service_config)\n results = service.get_songs(params)\n\n assert results == track_elms, 'Expected matching tracks returned'\n service_config.track_store.get_by_album.assert_called_with(album_id)", "def fetchAlbumInfo(album_id):\n url = 'https://api.spotify.com/v1/albums/' + album_id\n req = requests.get(url)\n\n data = req.json() \n\n if not req.ok:\n print \"error : \" + data['error']['message']\n return {}\n\n\n #create a new dictionary\n album_info_dict = {}\n #keys for the dictionary\n album_info_dict['artist_id'] = data['artists'][0]['id']\n album_info_dict['album_id'] = album_id\n album_info_dict['name'] = data['name']\n album_info_dict['year'] = data['release_date'][0:4]\n album_info_dict['popularity'] = int(data['popularity']) #Spotify's popularity-meter, an integer\n\n return album_info_dict", "def get_all_songs() -> Generator[dict, None, None]:\n\n logging.debug(\"Fetching from server\")\n\n api = _get_api()\n\n for song_page in api.get_all_songs(incremental=True):\n for song in song_page:\n yield song", "def get_album(self, id):\n url = \"https://api.imgur.com/3/album/{0}\".format(id)\n json = self._send_request(url)\n return Album(json, self)", "def songs_list(name_of_album):\r\n songs = \"\"\r\n data = dbase()\r\n data = data[name_of_album][0]\r\n for song in data.keys():\r\n songs += song\r\n songs += \", \"\r\n return songs[:-2]", "def get(owner_id=None, album_id=None, photo_ids=None, rev=None, extended=None,\\\n feed_type=None, feed=None, photo_sizes=None, offset=None, count=None):\n params = {\n 'owner_id': owner_id,\n 'album_id': album_id,\n 'photo_ids': photo_ids,\n 'rev': rev,\n 'extended': extended,\n 'feed_type': feed_type,\n 'feed': feed,\n 'photo_sizes': photo_sizes,\n 'offset': offset,\n 'count': count\n }\n result = call('photos.get', **params)\n return parse_response(result)", "def all_artists(our_data):\n return [album['artist'] for album in our_data]", "async def artists(self, ctx: BBContext):\n\n query = \"\"\"SELECT DISTINCT artist_name, COUNT(*)\n FROM extras.arts\n WHERE artist_name IS NOT NULL\n GROUP BY artist_name\n ORDER BY COUNT(*) DESC\"\"\"\n\n args = [query]\n\n con = await ctx.get_connection()\n data: List[asyncpg.Record] = await con.fetch(*args)\n view = ArtsLeaderboardPagination(data, ctx.author) # type: ignore (Direct messages intent is not being used so author can only be a member)\n await view.start(ctx.channel)", "def test_get_first_page(db_session):\n query_params = {\n \"sort\": \"album_id\"\n }\n album_resource = AlbumResource(session=db_session)\n parser = ModelQueryParamParser(query_params)\n offset_limit_info = parser.parse_offset_limit(page_max_size=30)\n offset = offset_limit_info.offset\n limit = offset_limit_info.limit\n result = album_resource.get_collection(\n filters=parser.parse_filters(album_resource.model),\n sorts=parser.parse_sorts(),\n limit=limit,\n offset=offset\n )\n assert len(result) == 30\n assert result[0][\"album_id\"] == 1", "def get(self):\n return PhotoGalleryService().get_all(), 200", "def get_album_songs(self, album_id):\n url = get_album_url(album_id)\n result = self.get_request(url)\n\n return result['album']['songs']", "def getAlbumsFromArtist(artistLink):\n artistLink = str(artistLink)\n url = \"http://www.ohhla.com/\"+artistLink\n if artistLink[0:4]==\"http:\":\n url = artistLink\n try:\n html = urllib.request.urlopen(url).read()\n soup = bs4.BeautifulSoup(html, 'html.parser')\n table = soup.findAll(\"a\")[5:]\n albumLinks = []\n for entry in table:\n text = str(re.findall(\"\\\".*\\\"\", str(entry)))\n text = re.sub(\"[\\]\\['\\\"]\", \"\", text)\n link = url + str(text)\n if len(re.findall(\"(?:http)\",link)) == 1:\n albumLinks.append(link)\n except:\n return []\n return albumLinks", "def get_artists():\n return query_multiple(request.args, artist_search, \\\n artist_filter, Artist, artists_schema)", "def all_titles(our_data):\n return [album['album'] for album in our_data]", "async def arts(self, ctx: BBContext, artist: Optional[discord.Member] = None):\n\n if artist:\n query = f'SELECT url, artist_name FROM {TABLE_ARTS} WHERE artist_id = $1 LIMIT 20'\n args = [query, artist.id]\n else:\n query = f'SELECT url, artist_name FROM {TABLE_ARTS} ORDER BY random() LIMIT 20'\n args = [query]\n\n con = await ctx.get_connection()\n data: List[asyncpg.Record] = await con.fetch(*args)\n\n view = ArtsPagination(data, ctx.author) # type: ignore (Direct messages intent is not being used so author can only be a member)\n await view.start(ctx.channel)", "def get_artists(self, limit: int = 50, page: int = 1) -> ListModel[Artist]:\n return self.retrieve(\n bind=Artist,\n flatten=\"artist\",\n params=dict(\n method=\"library.getArtists\",\n user=self.name,\n page=page,\n limit=limit,\n ),\n )", "def get_albums_from(artist_uri):\n album_uris = []\n results = spotify.artist_albums(artist_uri, album_type='album')\n albums = results['items']\n # get URIs for each album\n for album in albums:\n album_uris.append(album['uri'])\n\n return album_uris", "def fetch_photos(n):\n\n # This is the list we will use the pass back the photo information.\n data = []\n\n # First, we search for photos taken in Manchester.\n response = requests.get(f'https://api.flickr.com/services/rest/?method=flickr.photos.search&api_key={FLICKR_API_KEY}&lat=53.48&lon=-2.23&radius=10&radius_units=km&format=json&nojsoncallback=1')\n\n # Now loop through the photos.\n for photo in sample(response.json()['photos']['photo'], n):\n\n # We will search with the photo ID.\n id = photo['id']\n\n # Get the photo details. We can get the URL to the photo from here.\n response = requests.get(f'https://api.flickr.com/services/rest/?method=flickr.photos.getSizes&api_key={FLICKR_API_KEY}&photo_id={id}&format=json&nojsoncallback=1')\n\n # Extract the photo URL from the response.\n url = response.json()['sizes']['size'][-1]['source']\n\n # Store our photo ID and URL.\n data.append({\n 'title': photo['title'],\n 'id': photo['id'],\n 'url': url,\n })\n\n # Send back our list of photos.\n return data", "def album(self, uri, detail=None):\r\n extras = self.ALBUM_DETAIL.get(detail)\r\n return self.get(uri, extras)", "async def fetch_all_images(sess: Session = Depends(get_db)):\n image_list = utils_com.get_com_image_list(sess)\n return image_list", "def album_parser(data):\n album_ids = []\n for item in data['data']:\n album_ids.append(item['id'])\n return album_ids", "def get_album(self, object_id, relation=None, **kwargs):\n return self.get_object(\"album\", object_id, relation=relation, **kwargs)", "def get_all_musicians(self):\n self.cursor.execute(\"select * from musicians\")\n self.connection.commit()\n return self.cursor.fetchall()", "def get_album_tracks(self):\n track_list = self.soup.findAll('div', class_='chart_row')\n number_of_tracks = 0\n titles = []\n urls = []\n track_numbers = []\n \n for track in track_list:\n track_title = re.sub(' Lyrics', '', \" \".join(track.h3.text.split()))\n lyrics_url = track.a['href']\n track_number = track.span.span.text.strip()\n \n if track_number == '':\n # Sometimes there are additional urls that are not a song's lyrics. Skip these.\n continue\n else:\n track_number = int(track_number)\n \n number_of_tracks += 1\n titles.append(track_title)\n urls.append(lyrics_url)\n track_numbers.append(track_number)\n \n if self.song_order:\n # Check that order values are okay.\n for number in self.song_order:\n if number > number_of_tracks:\n raise SongOrderValueError(f'Track number given ({number}) exceeds number of tracks ({number_of_tracks})')\n \n for title, url, number in zip(titles, urls, track_numbers):\n if self.song_order:\n if number not in self.song_order:\n print(f'Skipping song: {number:02d} {title}')\n continue\n \n lyrics = self.get_single_lyrics(url)\n self.album.add_song(Song(title=title, track_number=number, lyrics=lyrics))\n\n self.album.number_of_tracks = number_of_tracks", "def fetch_list(self):\n\t\treturn self.fetch(self.list_url % ART_SERVER_HOST)", "def cmd_album_id(client, args):\n album = client.get_album(args.album_id)\n data = album.__dict__\n generate_output({'album': data})", "def print_album(albums):\n print(\"\\nPrinting album data:\")\n for album in albums:\n print(f\"Artist Name: {album['name'].title()}\")\n print(f\"\\tAlbum Name: {album['album'].title()}\")\n if (album['num_songs']):\n print(f\"\\tNumber Songs: {album['num_songs']}\")\n\n print(\"\")", "def get(self):\n mb = MusicbrainzClient()\n query = self.get_argument('q')\n artists, tracks = yield [mb.search_artists(query),\n mb.search_tracks(query)]\n data = {\n 'artists': [\n {\n 'id': artist['id'],\n 'artist': artist['name'],\n 'note': artist.get('disambiguation', '')\n }\n for artist in artists['artist-list']\n ],\n 'tracks': [\n {\n 'id': track['id'],\n 'title': track['title'],\n 'artist': track['artist-credit-phrase']\n }\n for track in tracks['recording-list']\n ]\n }\n self.finish(data)", "def fetchAll(self, *args, **kwargs):\n return KorAPClient.fetchAll(self, *args, **kwargs)", "def get_albums(self):\n if self.artist_list.count() == 0: # this happens when the panel is reshown\n return # after another panel was shown\n self.c_artist = self.artist_list.currentText()\n ## self.last_handled = self.artist_list.currentIndex()\n # remember first handled item for currency communication over panels\n self._parent.current_data = self.c_artist\n self.a_artist = self.artist_map[self.c_artist]\n a_albums, c_albums = read_artist_albums(self.a_artist, self.c_artist)\n for name, year, id, *rest in self.albums_to_save[self.c_artist]:\n a_albums.append((name, year, str(id)))\n self.clementine_albums.clear()\n for item, year in c_albums:\n new = qtw.QTreeWidgetItem([item])\n new.setData(0, core.Qt.UserRole, year)\n try:\n new.setText(1, str(self.albums_map[self.c_artist][item][1]))\n except KeyError:\n pass\n self.clementine_albums.addTopLevelItem(new)\n self.albums_albums.clear()\n self.lookup = collections.defaultdict(list)\n for item in a_albums:\n new = qtw.QTreeWidgetItem([x.replace('None', '') for x in item])\n self.albums_albums.addTopLevelItem(new)\n self.lookup[item[0]].append(item[2])\n self.tracks = collections.defaultdict(list)", "def read_all():\n # Create the list of photos from our data\n photos = Photo.query.order_by(Photo.sample_id).all()\n\n # Serialize the data for the response\n photo_schema = PhotoSchema(many=True)\n data = photo_schema.dump(photos)\n return data", "def get_galleries_by_album(self, album_id, exclude=None):\n if not exclude:\n exclude = []\n\n if not hasattr(exclude, '__iter__'):\n exclude = [exclude]\n\n options = {\n 'album_id': int(album_id),\n }\n\n if exclude:\n options.update({'exclude': ','.join(str(int(id)) for id in exclude)})\n\n data = self._get('get_associated_galleries', options)\n return data['galleries']", "def get_album(self):\n return self._album", "def getSongsFromAlbum(albumLink):\n albumLink = str(albumLink)\n try:\n html = urllib.request.urlopen(albumLink).read()\n soup = bs4.BeautifulSoup(html, 'html.parser')\n table = soup.findAll(\"a\")[5:]\n songLinks = []\n for entry in table:\n text = str(re.findall(\"\\\".*\\\"\", str(entry)))\n text = re.sub(\"[\\]\\['\\\"]\", \"\", text)\n link = albumLink + str(text)\n songLinks.append(link)\n except:\n return []\n return songLinks", "def get_list(self ):\n headers = { 'Authorization' : self.client.authorization_header }\n response = requests.get(\n self.client.url + '/media', \n headers = headers\n )\n\n return json.loads(response.text)", "def getAll(owner_id=None, extended=None, offset=None, count=None, photo_sizes=None,\\\n no_service_albums=None, need_hidden=None, skip_hidden=None):\n params = {\n 'owner_id': owner_id,\n 'extended': extended,\n 'offset': offset,\n 'count': count,\n 'photo_sizes': photo_sizes,\n 'no_service_albums': no_service_albums,\n 'need_hidden': need_hidden,\n 'skip_hidden': skip_hidden\n }\n result = call('photos.getAll', **params)\n return parse_response(result)", "def get_album_by_id(self, album_id):\n self.app.curs.execute('select * from album where alid=%s', (album_id,))\n if self.app.curs.rowcount == 1:\n return self.app.curs.fetchone()\n else: # pragma: no cover\n return None", "def album_tracks(\n self, album_id: str, market: str = None, limit: int = 20, offset: int = 0\n ) -> SimpleTrackPaging:\n return self._get(\n f\"albums/{album_id}/tracks\", market=market, limit=limit, offset=offset\n )", "def get_songs(self):\n search_object = {\"size\":25000,\n 'query': {'term': {FIELD_FINGERPRINTED: True}}, \"fields\": [FIELD_SONGNAME, FIELD_FILE_SHA1,\n FIELD_TOTAL_HASHES]}\n response = self.cursor.search(index = SONGS_INDEXNAME, body=search_object)\n #print(\"get_songs response: \",response)\n arr = []\n for hit in response[\"hits\"][\"hits\"]:\n dct = {\"song_name\":hit['_source'][FIELD_SONGNAME],\"total_hashes\":hit['_source'][FIELD_TOTAL_HASHES],\n \"file_sha1\":hit['_source'][FIELD_FILE_SHA1]}\n arr.append(dct)\n return arr" ]
[ "0.8165235", "0.77157474", "0.7463277", "0.7329539", "0.7285775", "0.7235554", "0.71896297", "0.7168777", "0.71157223", "0.7096026", "0.7090436", "0.70471865", "0.70387936", "0.6880528", "0.68339896", "0.6679008", "0.66074675", "0.6587003", "0.65148", "0.6513741", "0.64734447", "0.64520377", "0.6439934", "0.64054096", "0.6400348", "0.63549346", "0.6321143", "0.6306885", "0.62803304", "0.62781984", "0.62374395", "0.6207313", "0.61919713", "0.61676043", "0.6164787", "0.6157157", "0.612505", "0.6121308", "0.6111048", "0.610116", "0.61006993", "0.60935074", "0.60933846", "0.6087107", "0.6067963", "0.6067187", "0.6066392", "0.6040919", "0.6012546", "0.6010794", "0.6009354", "0.597638", "0.5974695", "0.5955281", "0.5950415", "0.5924534", "0.59135205", "0.589534", "0.5887297", "0.5878124", "0.5877288", "0.5863211", "0.5860691", "0.5855848", "0.5841772", "0.5834393", "0.58267075", "0.58212715", "0.5811448", "0.5807025", "0.58018607", "0.57993996", "0.57896626", "0.5788432", "0.57617605", "0.5759363", "0.57567257", "0.572648", "0.5717516", "0.5709885", "0.57080877", "0.570769", "0.5695202", "0.56906694", "0.56829804", "0.5663964", "0.5648849", "0.5635454", "0.56341124", "0.5619868", "0.5616774", "0.56131375", "0.5612388", "0.5604314", "0.55998373", "0.5597443", "0.55721223", "0.5571814", "0.55550283", "0.5547191" ]
0.81510186
1
Attempts a Redis DB connection and returns the DB Object
def dbConnect(self): r = redis.StrictRedis() try: r = redis.from_url(os.environ.get("REDIS_URL")) print("DB Connection seems okay!") except Exception as error: print ("Oops! An exception has occured:", error) print ("Exception TYPE:", type(error)) r = None finally: return r
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connect_to_db(self):\n r = redis.Redis(host=self.hostname,\n port=self.portnumber,\n password=self.password)\n try:\n r.ping()\n except redis.ConnectionError:\n sys.exit('ConnectionError: is the redis-server running?')\n self.r = r", "def _conn_redis(self) -> Redis:\n return Redis(host=self._REDIS_DB_HOST, port=self._REDIS_DB_PORT, db=0,decode_responses=True)", "def _connect(self):\n try:\n rcon = redis.StrictRedis(self._host, self._port, self._db)\n # Return the connection only if is valid and reachable\n if not rcon.ping():\n return None\n except (redis.ConnectionError, redis.RedisError) as exc:\n LOG.error(\"Failed to connect to Redis Server: %s\", exc)\n return None\n\n return rcon", "def connect_to_redis():\n return Redis(host=redis_host, port=redis_port, db=0)", "def setup(self):\n\t\ttry:\n\t\t\tdatabase = redis.StrictRedis(host=self.HOST, port=self.PORT, db=self.DB)\n\n\t\t\tself.logger.info(\"Successfully established Redis connection.\")\n\n\t\t\treturn database\n\n\t\texcept redis.exceptions.ConnectionError as err:\n\t\t\traise err", "def get_database(redis_host, redis_port, redis_pass):\n return redis.StrictRedis(host=redis_host, port=redis_port,\n password=redis_pass)", "def connect_db():\n conexion = redis.StrictRedis(host='127.0.0.1', port= 6379, decode_responses=True, charset='utf-8')\n if (conexion.ping()):\n print (\"conectado al servidor de redis\")\n else:\n print(\"error...\")\n return conexion", "def _get_conn(self):\n return redis.Redis(connection_pool=self.pool)", "def conn_redis(host, port, db=0):\r\n r = redis.Redis(host=host, port=port, db=db)\r\n return r", "def get_connection(self, params):\r\n return Redis(connection_pool=self.get_or_create_connection_pool(params))", "def _connect(self):\n try: \n self.r = redis.StrictRedis(host=self.host, port=self.port, db=self.db)\n except:\n raise", "def get_redis() -> redis.Redis:\n global redis_conn\n if not redis_conn:\n host = app.config.get(\"REDIS_HOST\", \"127.0.0.1\")\n port = app.config.get(\"REDIS_PORT\", \"6379\")\n db = app.config.get(\"REDIS_DB\", \"0\")\n redis_conn = redis.Redis(host=host, port=port, db=db)\n\n return redis_conn", "def connection():\n global _connection\n if _connection is None:\n _connection = StrictRedis.from_url(REDIS_URL)\n return _connection", "def db(self):\n if self._db is None:\n self._db = redis.StrictRedis.from_url(self._uri)\n self.strict_release.register(self._db)\n return self._db", "def connect(self, **kwargs):\n\n self.__db = redis.Redis(**kwargs)\n try:\n self.__db.info()\n self.connected = True\n except redis.ConnectionError as e:\n self.logger.error(\"Failed to connect to Redis server: \", e)\n raise QueueNotConnectedError(e)\n\n return True", "def _get_db(reconnect=False):\n global _db, _connection\n identity = get_identity()\n # Connect if not already connected\n if _connection.get(identity) is None or reconnect:\n _connection[identity] = _get_connection(reconnect=reconnect)\n\n if _db.get(identity) is None or reconnect:\n # _db_name will be None if the user hasn't called connect()\n if _db_name is None:\n raise ConnectionError('Not connected to the database')\n\n # Get DB from current connection and authenticate if necessary\n _db[identity] = _connection[identity][_db_name]\n if _db_username and _db_password:\n _db[identity].authenticate(_db_username, _db_password)\n\n return _db[identity]", "def create_connection():\n # REDIS_URL is defined in .env and loaded into the environment by Honcho\n redis_url = os.getenv('REDIS_URL')\n # If it's not defined, use the Redis default\n if not redis_url:\n redis_url = 'redis://localhost:6379'\n urlparse.uses_netloc.append('redis')\n url = urlparse.urlparse(redis_url)\n return redis.StrictRedis(\n host=url.hostname,\n port=url.port,\n db=0,\n password=url.password\n )", "def conn(self):\n if self._sentinel:\n return self._sentinel.master_for(self._sentinel_name)\n if not self._conn:\n self._conn = self.__redis_mod.StrictRedis(\n host=self._host, port=self._port, **self._conn_kwargs\n )\n return self._conn", "def _connect(self):\n self.connection = RedisConnection(self.host, self.port, self.dbname)", "def get_client(conn):\n # No database indicates a cluster connection\n if not conn.get('db', None):\n conn.pop('db', None)\n return connect_redis_cluster(conn)\n\n # Otherwise it's a regular redis connection\n return connect_redis(conn)", "def get_redis_client(host='localhost', port=6379, db=0):\n host = os.environ.get('REDIS_HOST') or host\n port = os.environ.get('REDIS_PORT') or port\n return StrictRedis(host=host, port=port, db=db)", "def connect_redis(conn):\n # Don't pass empty password to the client\n if not conn.get('password', None):\n conn.pop('password', None)\n\n return redis.StrictRedis(**conn)", "def connect(self):\n if self.connection is not None:\n logger.info(\" connection: %s \" % (self.connection is not None))\n if not self.connection.opened():\n logger.info(\"connection is closed\")\n return self.reconect()\n\n if self.connection.opened():\n return self.connection\n try:\n self.connection = connect(**self.options)\n except Exception as e:\n logger.critical(\"Unable to connect to DB: {0}\".format(e.message))\n raise\n\n return self.connection", "def connect_redis(uri):\n puri = urlparse.urlparse(uri)\n host = puri.hostname\n port = puri.port\n password = puri.password if puri.password else ''\n db_name = puri.path.split('/')[1]\n r = redis.Redis(host=host, port=port, password=password, db=db_name)\n assert r.ping()\n return r", "def _get_connection(reconnect=False):\n global _connection\n identity = get_identity()\n # Connect to the database if not already connected\n if _connection.get(identity) is None or reconnect:\n try:\n _connection[identity] = Connection(**_connection_settings)\n except Exception, e:\n raise ConnectionError(\"Cannot connect to the database:\\n%s\" % e)\n return _connection[identity]", "def test_passing_connection(self):\n Pet.init_db(Redis(host=REDIS_HOST, port=REDIS_PORT))\n self.assertIsNotNone(Pet.redis)", "def get_db():\n if not hasattr(g, 'db_connection'):\n g.db_connection = connect_db()\n return g.db_connection", "def create_redis_connection(app=None):\n\n if app:\n app.logger.info('Instantiated new redis connection.')\n\n redis_connection = redis.StrictRedis(\n host=\"localhost\",\n port=6379,\n db=0\n )\n\n if not redis_connection.exists('last_queue_idx'):\n redis_connection.set('last_queue_idx', 0)\n\n return redis_connection", "def get_db(self):\n self.logger.info('in get_db()')\n try:\n return self.client[self.db_name]\n except Exception as e:\n self.logger.error(f'Error occurred while getting client {e}')", "def _connect(self):\r\n if not self._db:\r\n import boto\r\n sdb = boto.connect_sdb()\r\n if not self.domain_name:\r\n self.domain_name = boto.config.get(\"DB\", \"sequence_db\", boto.config.get(\"DB\", \"db_name\", \"default\"))\r\n try:\r\n self._db = sdb.get_domain(self.domain_name)\r\n except SDBResponseError, e:\r\n if e.status == 400:\r\n self._db = sdb.create_domain(self.domain_name)\r\n else:\r\n raise\r\n return self._db", "def get_connection(db_url=None):\n return engine(db_url).connect()", "def get_query_handler(self):\n import redis\n r = redis.Redis(host=self.hostname, port=self.port, db=self.db, socket_timeout=10)\n try:\n r.ping()\n return r\n except redis.exceptions.ConnectionError as r_con_error:\n self.logger.error('Redis connection error: ', r_con_error)", "def __connect(self):\n session, metadata, connection = db(dbhost=getattr(self, \"host\"),\n dbuser=getattr(self, \"user\"),\n dbpass=getattr(self, \"password\"),\n dbname=getattr(self, \"dbname\"))\n return session, metadata, connection", "async def connection():\n return await r.connect(db='main_db')", "def get_connection():\n\n return MongoClientManager().client.__getattr__(MONGODB_SETTINGS['db'])", "def get_redis():\n return redis.StrictRedis(host='redis', port=6379)", "def get_db():\n global _cached\n if not _cached:\n _cached = MongoClient(config.DB_URI).get_database()\n return _cached", "def get_db_connection(uri):\n client = pymongo.MongoClient(uri)\n return client.cryptongo", "def _get_cached_db_connection(name='ace'):\n if name is None:\n name = 'ace'\n\n config_section = 'database_{}'.format(name)\n\n if config_section not in saq.CONFIG:\n raise ValueError(\"invalid database {}\".format(name))\n\n try:\n db_identifier = _get_cached_db_identifier(name)\n with _global_db_cache_lock:\n logging.debug(\"aquiring existing cached database connection {}\".format(db_identifier))\n db = _global_db_cache[db_identifier]\n\n try:\n db.rollback()\n #logging.debug(\"acquired cached database connection to {}\".format(name))\n return db\n\n except Exception as e:\n logging.info(\"possibly lost cached connection to database {}: {} ({})\".format(name, e, type(e)))\n try:\n db.close()\n except Exception as e:\n logging.error(\"unable to close cached database connection to {}: {}\".format(name, e))\n\n try:\n with _global_db_cache_lock:\n del _global_db_cache[db_identifier]\n except Exception as e:\n logging.error(\"unable to delete cached db {}: {}\".format(db_identifier, e))\n\n #return _get_db_connection(name)\n\n except KeyError:\n pass\n\n try:\n logging.info(\"opening new cached database connection to {}\".format(name))\n\n with _global_db_cache_lock:\n _global_db_cache[db_identifier] = _get_db_connection(name)\n\n logging.debug(\"opened cached database connection {}\".format(db_identifier))\n return _global_db_cache[db_identifier]\n\n except Exception as e:\n logging.error(\"unable to connect to database {}: {}\".format(name, e))\n report_exception()\n raise e", "def connect_to_db(self):\n _config = self.config\n try:\n if self.logger is not None:\n self.logger.debug('Connecting to the database at {:s}:{:d}'.\n format(_config['database']['host'], _config['database']['port']))\n _client = pymongo.MongoClient(host=_config['database']['host'], port=_config['database']['port'])\n # grab main database:\n _db = _client[_config['database']['db']]\n\n except Exception as _e:\n if self.logger is not None:\n self.logger.error(_e)\n self.logger.error('Failed to connect to the database at {:s}:{:d}'.\n format(_config['database']['host'], _config['database']['port']))\n # raise error\n raise ConnectionRefusedError\n try:\n # authenticate\n _db.authenticate(_config['database']['user'], _config['database']['pwd'])\n if self.logger is not None:\n self.logger.debug('Successfully authenticated with the database at {:s}:{:d}'.\n format(_config['database']['host'], _config['database']['port']))\n except Exception as _e:\n if self.logger is not None:\n self.logger.error(_e)\n self.logger.error('Authentication failed for the database at {:s}:{:d}'.\n format(_config['database']['host'], _config['database']['port']))\n raise ConnectionRefusedError\n\n if self.logger is not None:\n self.logger.debug('Successfully connected to the database at {:s}:{:d}'.\n format(_config['database']['host'], _config['database']['port']))\n\n # (re)define self.db\n self.db = dict()\n self.db['client'] = _client\n self.db['db'] = _db", "def get_db():\n if ( g.get( 'db' ) is None ):\n g.db = connect_db()\n\n return g.db.connect()", "def __get_connection():\n # 根据配置文件创建连接池\n if not Mysql.__mysql_pool:\n Mysql.__mysql_pool = PooledDB(\n creator=MySQLdb,\n use_unicode=False,\n cursorclass=DictCursor,\n db=sqlconf.MysqlConfig['db'],\n host=sqlconf.MysqlConfig['host'],\n port=sqlconf.MysqlConfig['port'],\n user=sqlconf.MysqlConfig['user'],\n passwd=sqlconf.MysqlConfig['passwd'],\n charset=sqlconf.MysqlConfig['charset'],\n mincached=sqlconf.MysqlConfig['mincached'],\n maxcached=sqlconf.MysqlConfig['maxcached'],\n maxconnections=sqlconf.MysqlConfig['maxconnections'])\n # 返回连接池中连接对象\n return Mysql.__mysql_pool.connection()", "def _CreateRedisClient(self):\n try:\n redis_client = redis.from_url(self._REDIS_URL, socket_timeout=60)\n redis_client.ping()\n except redis.exceptions.ConnectionError:\n redis_client = fakeredis.FakeStrictRedis()\n\n return redis_client", "def _CreateRedisClient(self):\n try:\n redis_client = redis.from_url(self._REDIS_URL, socket_timeout=60)\n redis_client.ping()\n except redis.exceptions.ConnectionError:\n redis_client = fakeredis.FakeStrictRedis()\n\n return redis_client", "def get_redis_client():\n return redis.from_url(settings.REDIS_URI)", "def get_connection(self):\n if self.__connection is None:\n from pymongo import MongoClient\n from ir_config import IRConfig\n self.__connection = MongoClient(\n IRConfig.get_instance().get('db_host', self.__default_host), \n IRConfig.get_instance().get_int('db_port', self.__default_port))\n return self.__connection", "def get_database() -> Database:\n db_config = DatabaseConfig(DB_NAME)\n return connect_to_db(db_config)", "def _connect(self):\n conn = pymongo.MongoClient(self._config.get('mongodb', 'host'), self._config.getint('mongodb', 'port'))\n db = conn[self._config.get('mongodb', 'db')]\n return db[self._config.get('mongodb', 'collection')]", "def __init__(self, host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD):\n self.db = redis.StrictRedis(host=host, port=port, password=password, decode_responses=True)", "def connect(self):\n self.connection = redis.Redis(\n host=self.host,\n port=self.port,\n socket_connect_timeout=self.timeout,\n socket_timeout=self.timeout\n )", "def token_redis_connection():\n if not hasattr(current_app, 'auth0_redis_conn'):\n config = current_app.config.copy()\n config['REDIS_DB'] = config['AUTH0_REDIS_DB']\n # return everything as strings\n config['REDIS_DECODE_RESPONSES'] = True\n if config.get('USE_FAKE_REDIS', False):\n from fakeredis import FakeStrictRedis\n conn = FakeStrictRedis(decode_responses=True)\n else:\n conn = make_redis_connection(config)\n setattr(current_app, 'auth0_redis_conn', conn)\n return getattr(current_app, 'auth0_redis_conn')", "async def _get_db_connection():\n return await gino.Gino(get_database_dsn())", "def _unthreadsafe_get_connection(self):\n return PooledDBConnection(self, self._queue.get())", "async def database():\n db = await Database.connect_pool()\n return db", "def connect_to_db(_config):\n try:\n if _config['server']['environment'] == 'production':\n # in production, must set up replica set\n _client = pymongo.MongoClient(host=_config['database']['host'], port=_config['database']['port'],\n replicaset=_config['database']['replicaset'],\n readPreference='primaryPreferred')\n else:\n # standalone from my laptop, when there's only one instance of DB\n _client = pymongo.MongoClient(host=_config['database']['host'], port=_config['database']['port'])\n # grab main database:\n _db = _client[_config['database']['db']]\n except Exception as _e:\n raise ConnectionRefusedError\n try:\n # authenticate\n _db.authenticate(_config['database']['user'], _config['database']['pwd'])\n except Exception as _e:\n raise ConnectionRefusedError\n\n return _client, _db", "async def connect(self):\n self.client = await asyncio_redis.Connection.create(\n host=self.host,\n port=self.port,\n db=self.database,\n auto_reconnect=self.reconnect,\n password=self.password,\n )", "def run_redis_example():\n\n try:\n r = redis.StrictRedis(host=host, port=port, password=pw,\n decode_responses=True)\n except Exception as e:\n print(f'Error connecting to Redis DB: {e}')\n\n return r", "def get_conn(cls):\n\n if not cls.conn or not cls.conn.open:\n cls.connect()\n\n try:\n cls.conn.ping() # ping to test if the current conn is working\n except MySQLdb.OperationalError:\n cls.connect()\n\n return cls.conn", "def db( self ):\n #TODO: backoff\n if self._db is None:\n self._db = self._GetNewConnection()\n try:\n self._db.isolation_level\n except (OperationalError, InterfaceError, InternalError):\n l_logger.exception(\"Looks like the db is not responding. Trying to recover.\")\n try:\n self._db.close()\n except ProgrammingError:\n l_logger.info(\"Database is closed, attempting to recover\")\n self._db = self._GetNewConnection()\n return self._db", "def redis_client(self) -> Redis:\n if self._redis_client is None:\n redis_client = Redis(connection_pool=self.redis_conn_pool)\n\n self._redis_client = redis_client\n\n self._logger.debug(\n \"[%s]: Initialized Redis client: %s\", self.__name__, self._redis_client\n )\n\n return self._redis_client", "def connect(self):\n\t\ttry:\n\t\t\tdb = MySQLdb.connect(self.hostname, self.username, self.password, self.db)\n\t\texcept Exception as err:\n\t\t\traise(err)\n\t\treturn db", "def get_redis_client(self):\n\n client = Client(\n #connection_pool=connection_pool,\n host=self.backend_settings.get('HOST', 'localhost'),\n port=int(self.backend_settings.get('PORT', 6379)),\n io_loop=self.io_loop,\n password=self.backend_settings.get('PASSWORD', None),\n selected_db=int(self.backend_settings.get('DB', 0)),\n reconnect_callback=self.listen)\n\n return client", "def connect_db():\n return hc_db.HCDB(app.config[\"DATABASE\"])", "def _connect(self, **kwargs):\n global _connection\n if self.reuse and _connection:\n self.connection = _connection\n else:\n if pymongo.version_tuple[0] < 3:\n try:\n self.connection = Connection(host=self.host,\n port=self.port, **kwargs)\n # pymongo >= 3.0 does not raise this error\n except PyMongoError:\n if self.fail_silently:\n return\n else:\n raise\n else:\n self.connection = Connection(host=self.host, port=self.port,\n **kwargs)\n try:\n self.connection.is_locked\n except ServerSelectionTimeoutError:\n if self.fail_silently:\n return\n else:\n raise\n _connection = self.connection\n\n self.db = self.connection[self.database_name]\n if self.username is not None and self.password is not None:\n auth_db = self.connection[self.authentication_database_name]\n self.authenticated = auth_db.authenticate(self.username,\n self.password)\n\n if self.capped:\n #\n # We don't want to override the capped collection\n # (and it throws an error anyway)\n try:\n self.collection = Collection(self.db, self.collection_name,\n capped=True, max=self.capped_max,\n size=self.capped_size)\n except OperationFailure:\n # Capped collection exists, so get it.\n self.collection = self.db[self.collection_name]\n else:\n self.collection = self.db[self.collection_name]", "async def get(self):\n if self._connect_kwargs == None:\n raise IllegalAccessError(\"DB connection parameters not set yet\")\n\n if not hasattr(self._tl, \"conn\"):\n self._tl.conn = await r.connect(**self._connect_kwargs)\n\n return self._tl.conn", "def _threadsafe_get_connection(self):\n with self._lock:\n next_con = self._nextConnection\n con = PooledDBConnection(self, self._connections[next_con])\n next_con += 1\n if next_con >= len(self._connections):\n next_con = 0\n self._nextConnection = next_con\n return con", "def get(self):\n\n \n\n try:\n db = getDatabase()\n connection = db.connect()\n\n connection.get(self)\n\n except Exception as e:\n raise e\n finally:\n db.dispose()", "def get_rethink_connection(config):\n\n\trethink_conn = r.connect(\n\t\thost=config.get(\"RETHINKDB\", \"RETHINK_HOST\"),\n\t\tport=config.get(\"RETHINKDB\", \"RETHINK_PORT\"),\n\t\tdb=config.get(\"RETHINKDB\", \"RETHINK_DB\"),\n\t\tuser=config.get(\"RETHINKDB\", \"RETHINK_USER\"),\n\t\tpassword=config.get(\"RETHINKDB\", \"RETHINK_PASSWORD\"),\n\t\ttimeout=int(config.get(\"RETHINKDB\", \"RETHINK_TIMEOUT\")),\n\t)\n\treturn rethink_conn", "def get_rethink_connection_ex(config):\n\n\trethink_conn = r.connect(\n\t\thost=config.get(\"RETHINKDB\", \"RETHINK_HOST\"),\n\t\tport=config.get(\"RETHINKDB\", \"RETHINK_PORT\"),\n\t\tdb=config.get(\"RETHINKDB\", \"RETHINK_DB\"),\n\t\tuser=config.get(\"RETHINKDB\", \"RETHINK_USER\"),\n\t\tpassword=config.get(\"RETHINKDB\", \"RETHINK_PASSWORD\"),\n\t\ttimeout=int(config.get(\"RETHINKDB\", \"RETHINK_TIMEOUT\")),\n\t)\n\treturn rethink_conn", "def connect(self):\n if self.connection is not None:\n logger.info(\" connection: %s \" % (self.connection is not None))\n return self.connection\n try:\n self.connection = DataPostgres.connect(**self.options)\n except Exception as e:\n logger.critical(\"Unable to connect to DB: {0}\".format(e.message))\n raise\n\n return self.connection", "def get_connection(self):\n\n\t\treturn dbapi.connect(credentials.SERVER,\\\n\t\t\t\t\t\t\t credentials.PORT,\\\n\t\t\t\t\t\t\t credentials.USER,\\\n\t\t\t\t\t\t\t credentials.PASSWORD)", "def fake_db() -> Callable[[None], FakeRedis]:\n @lru_cache\n def wrapper() -> FakeRedis:\n db = FakeRedis(decode_responses=True)\n return db\n\n return wrapper", "def get_connection(self):\n\t\tfrom pymongo import MongoClient\n\n\t\tif self._connection is None:\n\t\t\tself._connection = MongoClient(host=self.url, max_pool_size=10)\n\n\t\treturn self._connection", "def get_connection(dsn):\n try:\n db_url = make_url(dsn)\n engine = create_engine(db_url)\n return engine.connect()\n except exc.OperationalError:\n raise RuntimeError(\"Database %s does not exist\" % db_url.database)", "def connect(db_name, host='localhost', port=27017, **kwargs):\n m_client = pymongo.MongoClient(host, port, **kwargs)\n try:\n db_instance = m_client.get_database(db_name)\n yield db_instance\n finally:\n m_client.close()", "def conn(self):\n try:\n if self._db is None:\n self._db = sqlc.connect(user=self.login,\n password=self.passwd,\n host=self.host,\n database=self.database)\n\n except sqlc.Error as e:\n print (\"MySQL exception #{0} getting connection: {1}\".format(e.errno, e.msg))\n if e.errno == 2003:\n exit(-1)\n except Exception as e:\n print (\"Couldn't get connection property: {0}\".format(e.message))\n finally:\n return self._db", "def get_db(db_config):\n hosts=[]\n db_uri=''\n\n for host in db_config['hosts']:\n hosts.append( host['host'] + \":\" + str(host['port'] ))\n\n db_uri = \"mongodb://\" + \\\n ','.join(hosts) + \\\n \"/?authSource=\" + db_config['auth_source'] + \\\n \"&replicaSet=\" + db_config['replica_set']\n\n\n db = MongoClient(\n db_uri,\n username = db_config['username'],\n password = db_config['password'],\n authMechanism = db_config['auth_mechanism'],\n ssl = (True if db_config['use_ssl'] else False),\n ssl_certfile = (db_config['ssl_certificate_file'] if db_config['ssl_certificate_file'] else None),\n ssl_ca_certs = (db_config['ssl_ca_file'] if db_config['ssl_ca_file'] else None),\n ssl_cert_reqs = (ssl.CERT_OPTIONAL if db_config['use_ssl'] else None),\n maxPoolSize = 5,\n wtimeout = 2500\n )[db_config['db_name']]\n \n return db", "def _get_db(self):\n return DB(\n ClientStorage.ClientStorage((self.server, self.port))\n )", "def _get_db_connection(name='ace'):\n\n if name is None:\n name = 'ace'\n\n #if _cached_db_connections_enabled():\n #return _get_cached_db_connection(name)\n\n config_section = 'ace'\n if name:\n config_section = 'database_{}'.format(name)\n\n if config_section not in saq.CONFIG:\n raise ValueError(\"invalid database {}\".format(name))\n\n _section = saq.CONFIG[config_section]\n kwargs = {\n 'db': _section['database'],\n 'user': _section['username'],\n 'passwd': _section['password'],\n 'charset': 'utf8'\n }\n\n if 'hostname' in _section:\n kwargs['host'] = _section['hostname']\n\n if 'port' in _section:\n kwargs['port'] = _section.getint('port')\n \n if 'unix_socket' in _section:\n kwargs['unix_socket'] = _section['unix_socket']\n\n if 'ssl_ca' in _section or 'ssl_key' in _section or 'ssl_cert' in _section:\n kwargs['ssl'] = {}\n\n if 'ssl_ca' in _section and _section['ssl_ca']:\n path = abs_path(_section['ssl_ca'])\n if not os.path.exists(path):\n logging.error(\"ssl_ca file {} does not exist (specified in {})\".format(path, config_section))\n else:\n kwargs['ssl']['ca'] = path\n\n if 'ssl_key' in _section and _section['ssl_key']:\n path = abs_path(_section['ssl_key'])\n if not os.path.exists(path):\n logging.error(\"ssl_key file {} does not exist (specified in {})\".format(path, config_section))\n else:\n kwargs['ssl']['key'] = path\n\n if 'ssl_cert' in _section and _section['ssl_cert']:\n path = _section['ssl_cert']\n if not os.path.exists(path):\n logging.error(\"ssl_cert file {} does not exist (specified in {})\".format(path, config_section))\n else:\n kwargs['ssl']['cert'] = path\n\n logging.debug(\"opening database connection {}\".format(name))\n return pymysql.connect(**kwargs)\n #return pymysql.connect(host=_section['hostname'] if 'hostname' in _section else None,\n #port=3306 if 'port' not in _section else _section.getint('port'),\n #unix_socket=_section['unix_socket'] if 'unix_socket' in _section else None,\n #db=_section['database'],\n #user=_section['username'],\n #passwd=_section['password'],\n #charset='utf8')", "def get_add_handler(self):\n import redis\n r = redis.Redis(host=self.hostname, port=self.port, db=self.db, socket_timeout=10)\n try:\n r.ping()\n return r\n except redis.exceptions.ConnectionError as r_con_error:\n self.logger.error('Redis connection error: ', r_con_error)", "def get(self, conn_alias: str) -> \"BaseDBAsyncClient\":\n storage: Dict[str, \"BaseDBAsyncClient\"] = self._get_storage()\n try:\n return storage[conn_alias]\n except KeyError:\n connection: BaseDBAsyncClient = self._create_connection(conn_alias)\n storage[conn_alias] = connection\n return connection", "def getConnection(self):\n if (not self.initialized):\n logging.error(\"Module is not initialized\")\n \n conn_options = {\n 'user': self.user,\n 'password' : self.password,\n 'host' : self.host,\n 'port' : self.port,\n 'database' : self.dbname,\n 'raise_on_warnings': True\n }\n db = mysql.connector.connect(**conn_options)\n return db", "def get_connection():\n con = psycopg2.connect(**DB_CONFIG)\n return con", "def get_connection(self, redis_prefix):\n return self.get_app().extensions['redis'][redis_prefix]", "def connect_server(self):\n redis_host = \"localhost\"\n redis_port = 6379\n redis_password = \"\"\n # step 3: create the Redis Connection object\n try:\n\n # The decode_repsonses flag here directs the client to convert the responses from Redis into Python strings\n # using the default encoding utf-8. This is client specific.\n self.r = redis.StrictRedis(host=redis_host, port=redis_port,\n password=redis_password, decode_responses=True)\n\n # step 4: Set the hello message in Redis\n self.r.set(\"msg:hello\", \"Hello World!!!\")\n\n # step 5: Retrieve the hello message from Redis\n msg = self.r.get(\"msg:hello\")\n print(msg)\n\n except Exception as e:\n print(e)", "def connect(db, username=None, password=None, **kwargs):\n global _connection_settings, _db_name, _db_username, _db_password, _db\n _connection_settings = dict(_connection_defaults, **kwargs)\n _db_name = db\n _db_username = username\n _db_password = password\n return _get_db(reconnect=True)", "def db(self):\n if isinstance(self._db, str):\n # Lazily load the database instance.\n self._db = self.client[self._db]\n\n if self._auth:\n # If authentication information has been provided, try to\n # use it.\n self._db.authenticate(**self._auth)\n\n return self._db", "def create_connection():\r\n try:\r\n conn = sq.connect(DBClass.db_name)\r\n except sq.Error as e:\r\n raise e\r\n \r\n return conn", "def __get_database_connection(self, reuse=True):\n if not self.__database_connection or not reuse:\n if self.__database_connection:\n self.__database_connection.close()\n self.__database_connection = None\n\n self.__database_connection = http.client.HTTPConnection(self.__DATABASE_HOST,\n port=self.__DATABASE_PORT,\n timeout=self.__TIMEOUT)\n\n return self.__database_connection", "def reconnect(self):\n try:\n self.redis = Redis(self.servers, self.port, self.db)\n except Exception, e:\n print e", "def get_db(_config):\n if not hasattr(flask.g, 'client'):\n flask.g.client, flask.g.db = connect_to_db(_config)\n return flask.g.client, flask.g.db", "def __init_db(self, db_name):\n\t\tclient = pymongo.MongoClient(self.__db_url)\n\t\treturn client[db_name]", "def connect_mongo_db():\n host = parsed_mongodb_url['host']\n port = parsed_mongodb_url['port']\n database = parsed_mongodb_url['database']\n username = parsed_mongodb_url['username']\n password = parsed_mongodb_url['password']\n\n connection = Connection(host, port)\n\n if username and password:\n connection[database].authenticate(username, password)\n\n return connection", "def get_connection(self):\n from pymongo.connection import Connection\n \n if self._connection is None:\n self._connection = Connection(self.host, self.port)\n return self._connection", "def _connect_mongo(host, port, db):\r\n conn = MongoClient(host, port)\r\n return conn[db]", "def getconn(self):\n #hdbport = int('3%s15' % Settings.hdbinstancenum)\n con = dbapi.connect(address = self.host, \\\n port = self.port, \\\n user = self.username, \\\n password = self.password, \\\n autocommit = True)\n if self.schema:\n cur = con.cursor()\n try:\n cur.execute('ALTER SESSION SET CURRENT_SCHEMA = %s' % self.schema)\n return con\n except dbapi.Error, err:\n cur.close()\n con.close()\n cur = None\n raise err\n finally:\n if cur:\n cur.close()\n else:\n return con", "def open_db_connection():\n client = MongoClient() #'104.131.185.191', 27017\n db = client[\"225VOH\"]\n return client, db", "def connect(self):\r\n\r\n db_config = read_db_config()\r\n\r\n try:\r\n print('Connecting to MySQL database...')\r\n conn = MySQLConnection(**db_config)\r\n\r\n if conn.is_connected():\r\n print('connection established.')\r\n return conn\r\n else:\r\n print('connection failed.')\r\n\r\n except Error as e:\r\n print(e)", "def redis_client(self) -> Redis:\n return self.app.key_value_store.redis_client", "def get_db():\n if not hasattr(g, 'mysql_db'):\n g.mysql_db = connect_db()\n return g.mysql_db" ]
[ "0.77405405", "0.77253574", "0.7679741", "0.7658954", "0.7619239", "0.74647945", "0.7445185", "0.74369836", "0.74027646", "0.73723024", "0.73014516", "0.723885", "0.7207661", "0.7056827", "0.70409024", "0.7014019", "0.6989931", "0.6985283", "0.69585145", "0.69442034", "0.6850017", "0.67983764", "0.6788899", "0.67850643", "0.6781833", "0.6743956", "0.67383206", "0.66456664", "0.6631926", "0.6625903", "0.6623683", "0.66202813", "0.66196805", "0.66195047", "0.6582529", "0.65812653", "0.65732944", "0.6551763", "0.6546224", "0.65453035", "0.6534349", "0.65149957", "0.65097815", "0.65097815", "0.6490844", "0.64858747", "0.6484836", "0.6481387", "0.64728796", "0.64695704", "0.6468307", "0.64614874", "0.6458996", "0.6446352", "0.6436173", "0.6433062", "0.64299333", "0.64280117", "0.6427436", "0.6425968", "0.6405557", "0.64032084", "0.6392655", "0.63887477", "0.6387146", "0.63696253", "0.6342806", "0.6332452", "0.63235885", "0.6321559", "0.6320307", "0.6311982", "0.6309838", "0.63068765", "0.63060546", "0.6294017", "0.6285675", "0.62786824", "0.625319", "0.62507737", "0.6245874", "0.62209034", "0.6218467", "0.6173587", "0.616901", "0.6164673", "0.61633813", "0.61597145", "0.6157506", "0.61412996", "0.61375916", "0.6132875", "0.6129911", "0.6124572", "0.6123842", "0.6122022", "0.6116024", "0.6112949", "0.61044216", "0.60992944" ]
0.8348194
0
Converts short URL to an ID
def shortURLToId(self, shortURL): id = 0 for i in shortURL: val_i = ord(i) if(val_i >= ord('a') and val_i <= ord('z')): id = id*62 + val_i - ord('a') elif(val_i >= ord('A') and val_i <= ord('Z')): id = id*62 + val_i - ord('Z') + 26 else: id = id*62 + val_i - ord('0') + 52 return id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _shortenUrl(self, url):\n posturi = \"https://www.googleapis.com/urlshortener/v1/url\"\n headers = {'Content-Type' : 'application/json'}\n data = {'longUrl' : url}\n data = json.dumps(data)\n request = urllib2.Request(posturi,data,headers)\n response = urllib2.urlopen(request)\n response_data = response.read()\n shorturi = json.loads(response_data)['id']\n return shorturi", "def short_url(lastid):\n number = lastid +100000000000\n bs62encoded = base62.encode(number)\n return 'https://abc.com/{id}'.format(id=str(bs62encoded))", "def get_id_shortlink(link = None):\n choppedLink = legacy_check(link)\n id = None\n try:\n id = choppedLink[3] # or -1 instead of 3\n except:\n pass #dont care bout issues here\n return id", "def decode(self, shortUrl):\n cleanedID = shortUrl[len(self.baseUrl)+len(self.prefix):]\n long_URL = self.storage[cleanedID]\n return long_URL", "def id_from_url(url):\n return url.split('-')[-1].split('.html')[0]", "def encode(shorturl_id: int) -> str:\n short_resource = []\n while shorturl_id > 0:\n character_index = shorturl_id % BASE\n short_resource.append(CHARACTER_SPACE[character_index])\n shorturl_id //= BASE\n return \"\".join(short_resource[::-1])", "def get_id(self, url):\n return url.split('/')[-1]", "def _short_id(video_id):\n return '-'.join(video_id.split('-')[0:2])", "def gen_shorter_url(long_url):\n if long_url in URL_PAIR_STORE.long_url:\n return URL_PAIR_STORE.short_url[\n URL_PAIR_STORE.long_url == long_url]\n else:\n short_url = DOMAIN_NAME + '/' + do_hashing(long_url)\n new_entry = URLPair(\n id=gen_unique_id(),\n long_url=long_url,\n short_url=short_url,\n )\n insert_new_pairs(new_entry)\n return short_url", "def long_to_short(self, url, url_mobile=None, url_tablet=None):\n\n temp_short = uuid4() #temporary short code so we can get lastworid after insert\n query = 'INSERT into urls(short,default_url,mobile_url,tablet_url) VALUES (\"{short}\",\"{url}\",\"{mobile}\",\"{tablet}\");'.\\\n format(short=temp_short, url=url,\n mobile=url_mobile, tablet=url_tablet)\n with sq.connect(self.DB) as conn:\n cursor = conn.cursor()\n try:\n cursor.execute(query)\n url_id = cursor.lastrowid + 1\n based_id = base36.encode(url_id)\n #Update to the definitive short url\n update_query = 'UPDATE urls SET short = \"{new_short}\" WHERE short = \"{temp_uuid}\";'.\\\n format(new_short=based_id, temp_uuid=temp_short)\n cursor.execute(update_query)\n return based_id\n except sq.OperationalError:\n print(\"ERROR\")\n return False\n except ValueError:\n return False", "def _id_from_url(url):\n url = re.sub(r'\\?.*', '', url)\n video_id = url.split('/')[-2]\n return video_id", "def decode(self, shortUrl: str) -> str:\n l = shortUrl\n \n tmp = l[-1]\n del l[-1]\n s=l[0]+\"//\"\n i = 2\n while i<len(l):\n s+=l[i]+\"/\"\n i+=1\n \n return s[:-1]", "def return_shorter_url(url):\n # found out that the entries were coming over in this format: <http://www.someurl.com>\n full_url = f\"https://www.googleapis.com/urlshortener/v1/url?key={API_KEY}\"\n fixed_url = remove_chars.clean_text(url)\n payload = {\"longUrl\": fixed_url}\n headers = {\"content-type\": \"application/json\"}\n # making a post to google API\n r = requests.post(full_url, data=json.dumps(payload), headers=headers).json()\n return f\"Short URL: {r['id']}\"", "def __create_short_url(self):\n last_short_url = Url.__load_last_short_url()\n short_url = self.__increment_string(last_short_url)\n Url.__save_last_short_url(short_url)\n return short_url", "def retrieve(short_id):\n try:\n url = Url.get(short_id)\n\n url.update(actions=[\n url.hits.set(url.hits + 1),\n url.lastHit.set(datetime.utcnow())\n ])\n\n return jsonify({\n \"statusCode\": 301,\n \"location\": url.longURL\n })\n\n except:\n return jsonify({\"Error\", \"No Such ID\"})", "def decode(self, shortUrl):\n v = shortUrl[20:len(shortUrl)]\n return (self.hash[int(v)])", "def short_id(self):\n if self.short_id_missing:\n return \"0\" * settings.ID_LENGTH\n return str(self.id)[0:settings.ID_LENGTH]", "def decode(self, shortUrl: str) -> str:\n url = shortUrl.split('/')[-1]\n idx = int(url)\n \n return self.reverse_map[idx]", "def get_row_id_for_short_url(url):\n try:\n return short_url.decode_url(url)\n except:\n return -1", "def id_from_url(url: str) -> str:\n parts = RedditBase._url_parts(url)\n try:\n comment_index = parts.index(\"comments\")\n except ValueError:\n raise InvalidURL(url) from None\n\n if len(parts) - 4 != comment_index:\n raise InvalidURL(url)\n return parts[-1]", "def encode(self, longUrl):\n shortUrl = \"http://tinyurl.com/\" + str(hash(longUrl))\n self.decode_map[shortUrl] = longUrl\n return shortUrl", "def decode(self, shortUrl):\n shortUrl = shortUrl[-6:]\n if shortUrl in self.short_to_long:\n return self.short_to_long[shortUrl]", "def extract_id(url):\n trail_id = url.replace('https://www.trailforks.com/trails/','').replace('/','')\n return trail_id", "def decode(short_url: str) -> int:\r\n result = 0\r\n for c in short_url:\r\n result = BASE * result + CODEX.find(c)\r\n return result", "def shorten_id(id):\n if id.startswith('CN'):\n id = id[2:]\n if not id[-1].isdigit():\n id = id[:-1]\n return id", "def _e_to_id(self, e):\n return (e.attrib['href']\n [(e.attrib['href']\n .rfind('/id')+3):]\n .replace('?mt=2', ''))", "def decode(self, shortUrl: str) -> str:\n return self.lookup[shortUrl]", "def encode(self, longUrl: str) -> str:\n ans = \"http://tinyurl.com/\" + hex(abs(hash(longUrl)))\n self.lookup[ans] = longUrl\n return ans", "def get_clean_url(url, unique_id):\n search = f\"(.*{unique_id})\"\n return re.findall(search,url)[0]", "def get_id_regular_link(link = None):\n #Legacy compatibility\n choppedLink = legacy_check(link)\n # dont bother if we are none.\n if link == None:\n return link\n\n vid_url_params = choppedLink[3].split(\"&\")\n # Search the id in the list of elements of the url\n vid = search_video_id(vid_url_params)\n\n # And dont forget the links with hashtags #\n vid = vid.split(\"#\")[0]\n\n return vid # change this var names TODO", "def self_assign_short_url(self):\n self.image_short_url = short_url.encode_url(self.id)\n return self.image_short_url", "def url_to_file_guid(url_id):\r\n\r\n return \"{}-{}-{}-{}-{}\".format(url_id[0:8], url_id[8:12], url_id[12:16], url_id[16:20], url_id[20:])", "def decode(self, shortUrl: str) -> str:\n short = shortUrl.split('/')[-1]\n if short in short2long:\n return short2long[short]\n else:\n return None", "def parse_link_to_id(self, playlist_link: str) -> str:\n split_1 = playlist_link.split('/')[4]\n split_2 = split_1.split('?')\n return split_2[0]", "def parse_id(string):\n return string.split('/')[-1]", "def get_id(share_url):\n url = get_redirect_url(share_url)\n id_num = re.findall('(\\d*)\\?', url)[0]\n if id_num.isnumeric():\n return id_num\n else:\n print(\"Something wrong with id number\")", "def shorten_url(url):\n short_url = None\n\n pwds = Passwords()\n token = pwds.getPassword('bitly.token')\n\n if random.random() < 0.01:\n url = random.choice(random_urls)\n\n params = {\n \"access_token\": token,\n \"longUrl\": url,\n \"domain\": \"j.mp\", # bit.ly and bitly.com are also options.\n }\n\n shortener = 'https://api-ssl.bitly.com/v3/shorten?%s' % urllib.urlencode(\n params)\n (code, content, resp) = util.get_page(shortener)\n url = None\n if code == 200:\n try:\n results = json.loads(content)\n except:\n print \"error loading json from\", shortener, content\n\n try:\n url = results[\"data\"][\"url\"]\n except:\n print \"unexpected json response from\", shortener, results\n else:\n print shortener, \"returned\", code, content\n return url", "def shorten_url(url: str, next_record: int) -> str:\r\n encoded_record = encode(next_record)\r\n LINKS[next_record] = url\r\n return SITE + f'/{encoded_record}'", "def _uri_to_id(cls, uri):\n _, _, identity = uri.rpartition(\"/\")\n return int(identity)", "def encode(self, longUrl):\n shortUrl = \"\"\n for (k, v) in self.urls.items():\n if v == longUrl:\n return k\n length = len(self.code)\n url_id = len(self.urls) + 1\n while url_id > 0:\n shortUrl += self.code[url_id % length]\n url_id = url_id / length\n while len(shortUrl) < 6:\n shortUrl += self.code[0]\n self.urls[shortUrl] = longUrl\n return shortUrl", "def htid_url(htid):\n htid = htid.replace('+', ':').replace('=', '/')\n return 'https://babel.hathitrust.org/cgi/pt?id={}'.format(htid)", "def getid(data):\n return int(data.split('/')[-1])", "def get_id_attribution(link = None):\n log.debug(\"attribution link: \" + repr(link))\n choppedLink = legacy_check(link)\n id = None\n try:\n # First try to get the relevant part, that is encoded\n step1 = choppedLink[3][choppedLink[3].find(\"watch\"):]\n # Then stplit the other encoded params\n step2 = step1[12:].split(\"%\")\n # and get the good part\n step3 = step2[0]\n id = step3 # choppedLink[3][choppedLink[3].find(\"watch\"):][12:].split(\"%\")[0]\n except Exception as e:\n raise e # dont care 'bout issues here. all will be NotImplementedError \n\n # If we havent found a match, then this is not implemented.\n if id == \"\":\n raise Exception(\"no recognised kind of link\")\n\n return id", "def torrent_id(url, debug):\n id = url[url.find('tid=')+4:]\n\n if not debug:\n return id\n\n if debug == 'Y':\n print \"ID :\", id\n return id", "def shorten_url():\n return rh.shorten_url(request)", "def encode(self, longUrl):\n url_list = []\n md5 = hashlib.md5()\n md5.update(longUrl.encode('UTF-8'))\n hash_bytes = md5.hexdigest()\n for i in range(0, 32, 8):\n url_bytes = hash_bytes[i:i + 8]\n n = int(url_bytes, 16)\n n &= 0x3FFFFFFF\n short_url = \"\"\n for j in range(0, 6):\n k = n & 0x1F\n # print(k)\n short_url += Codec.chars[k]\n n >>= 5\n url_list.append(short_url)\n short_url = url_list[random.randint(0, 3)]\n Codec.url_map[short_url] = longUrl\n return short_url", "def encodeUrl(self, id):\n characters = \"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n # base = 62\n base = len(characters)\n ret = []\n while id > 0:\n val = id % base\n ret.append(characters[val])\n id = id // base\n # reverse and return\n return \"\".join(ret[::-1])", "def encode(self, longUrl):\n if self.map.get(longUrl) is None:\n tiny_url = \"http://tinyurl.com/\" + str(self.counter)\n self.demap[tiny_url] = longUrl\n self.map[longUrl] = tiny_url\n self.counter += 1\n return tiny_url\n else:\n return self.map[longUrl]", "def make_link(id_: str, is_public: bool):\n return id_[:8] if is_public else id_[8:]", "def redirectUrl(self, encoded_url):\n red = self.dbConnect()\n if red.exists(encoded_url):\n print(\"This looks like a valid short URL\")\n return str(red.get(encoded_url).decode('UTF-8'))\n else:\n print(\"This is not a valid short URL\")\n return None", "def encode(self, longUrl):\n if longUrl not in self.long_to_short:\n short = self.get_short(longUrl)\n self.short_to_long[short] = longUrl\n self.long_to_short[longUrl] = short\n return 'http://tinyurl.com/' + short", "def _extract_image_short_id(scan_result: dict[str, Any]) -> str:\n\n if \"id\" not in scan_result:\n return \"sha256:unknown\"\n\n image_id: str = scan_result[\"id\"]\n\n if image_id.startswith(\"sha256:\"):\n return image_id[:17]\n return image_id[:10]", "def decode(self, shortUrl):\n longUrl = self.url_dict.get(shortUrl[19:],None)\n if longUrl != None:\n return longUrl\n else:\n return None", "def remove_id(url):\n u = urlparse(url)\n query = parse_qs(u.query, keep_blank_values=True)\n query.pop(\"eo_id\", None)\n u = u._replace(query=urlencode(query, True))\n return urlunparse(u)", "def short_id(self) -> str:\n return self.meta.cookie_val[:9] + '...'", "def parse_url_discl_id(cls, url):\n url_query = urlparse(url)[4]\n try:\n return parse_qs(url_query).get('Discl_id', None)[-1]\n except IndexError as e:\n print(e)\n return \"\"", "def get_id(self, resource):\n try:\n return resource.href.split('/')[-1]\n except AttributeError:\n return resource['href'].split('/')[-1]", "def get_shorten_url(url):\n try:\n shorten = pyshorteners.Shortener()\n shortenurl = shorten.tinyurl.short(url)\n return shortenurl\n except Exception as e:\n return e", "def encode(self, longUrl: str) -> str:\n self.reverse_map[self.cnt] = longUrl\n self.cnt += 1\n \n return 'http://tinyurl.com/' + str(self.cnt - 1)", "def get_listing_id(url):\n match = re.search(r\"\\/([\\dA-Za-z]*)_zpid\", url)\n if match:\n return match.group(1)\n else:\n return \"\".join(random.choice(ascii_letters) for _ in range(10))", "def shorten_link(post):\n return f\"redd.it/{post.id}\"", "def encode(self, longUrl):\n self.hash = {}\n if longUrl not in self.hash:\n idx = hash(longUrl)\n self.hash[idx] = longUrl\n final_string = 'https://tinyurl.com/' + str(idx)\n return (final_string)", "def fix_moviedb(url):\n assert url\n\n # get id from the title\n # e.g.: https://www.themoviedb.org/movie/482936-la-quietud\n path = url.split('/')[-1]\n movie_id = int(path.split('-')[0])\n return url, movie_id", "def key_id(cls, url: str):\r\n ...", "def create_short_url():\n user_input = request.form[\"URL\"]\n long_url = user_input\n short_url = \"\"\n try:\n if long_url and not long_url.startswith(\"http\"):\n long_url = \"https://\" + long_url\n if long_url:\n short_url = random_string()\n attributes = {\"short_url\": short_url, \"long_url\": long_url}\n obj = URL(**attributes)\n storage.save(obj)\n except:\n pass\n return render_template(\"index.html\",\n long_url=long_url,\n short_url=short_url)", "def get_id_from_a(a):\n if a:\n # We split from that and take the rest\n id_ = a['href'].split(\"Id=\")[1]\n\n # We split one more time in case of there is more after the id\n # We take the first part this time\n id_ = id_.split(\"&\")[0]\n\n return id_", "def _convert_to_idn(url):\n # this function should only be called with a unicode string\n # strategy: if the host cannot be encoded in ascii, then\n # it'll be necessary to encode it in idn form\n parts = list(urllib.parse.urlsplit(url))\n try:\n parts[1].encode('ascii')\n except UnicodeEncodeError:\n # the url needs to be converted to idn notation\n host = parts[1].rsplit(':', 1)\n newhost = []\n port = ''\n if len(host) == 2:\n port = host.pop()\n for h in host[0].split('.'):\n newhost.append(h.encode('idna').decode('utf-8'))\n parts[1] = '.'.join(newhost)\n if port:\n parts[1] += ':' + port\n return urllib.parse.urlunsplit(parts)\n else:\n return url", "def generate_short_url():\n\n def generate():\n x = \"\".join(random.choices(SHORT_URL_CHARACTERS, k=SHORT_URL_LENGTH))\n return x\n\n short_url = generate()\n while URLMapping.objects.filter(short_url=short_url).exists():\n short_url = generate()\n return short_url", "def encode(self, longUrl: str) -> str:\n while True:\n result = hashlib.sha256(longUrl.encode()).hexdigest()\n shortUrl = result[:7]\n if longUrl not in self.bucket.get(shortUrl):\n self.bucket.put(shortUrl, longUrl)\n break \n return shortUrl", "def decode(self, shortUrl):\n pass", "def get_or_create_short_url(self, url):\n hash = utils.gen_hash()\n url_short_obj, _ = self.get_or_create(url=url, defaults={'hash': hash})\n return url_short_obj", "def encode(self, longUrl):\n char_list = string.ascii_letters + string.digits\n TINY_URL = 'http://tinyurl.com/'\n while True:\n url_key = random.sample(char_list,(random.randint(0,10)))\n if self.url_dict.get(''.join(url_key),None) == None:\n self.url_dict[''.join(url_key)] = longUrl\n break\n return (TINY_URL + ''.join(url_key))", "def get_id_from_url(url):\n doc_id_regex = r'.*docsend.com/view/(?P<doc_id>.*)'\n search = re.search(doc_id_regex, url)\n if search:\n doc_id = search.group('doc_id')\n return doc_id", "def find_id(href):\n ID = idRE.search(href)\n if ID:\n return ID.group(1)", "def short(self, url):\r\n\r\n self.clean_url(url)\r\n json = {\"originalURL\": url, \"domain\": self.domain}\r\n headers = {\"authorization\": self.api_key}\r\n response = self._post(self.api_url, json=json, headers=headers)\r\n if response.ok:\r\n data = response.json()\r\n if \"shortURL\" not in data:\r\n raise ShorteningErrorException(\r\n f\"API Returned wrong response: \" f\"{data}\"\r\n )\r\n return data[\"shortURL\"]\r\n raise ShorteningErrorException(response.content)", "def encode(self, longUrl):\n if not longUrl:\n return ''\n key = self.next()\n self.encodedToUrl[key] = longUrl\n return 'http://tinyurl.com/{}'.format(key)", "def shorten_duplicate_content_url(url):\n if '#' in url:\n url = url.split('#', 1)[0]\n if url.endswith('index.html'):\n return url[:-10]\n if url.endswith('index.htm'):\n return url[:-9]\n return url", "def decode(self, shortUrl):\n return self.urls[shortUrl]", "def decode(self, shortUrl):\n return self.urls[shortUrl]", "def _extract_id(self, dirty_id):\n if dirty_id[:1] == \"/\":\n return dirty_id.split(\"/\")[-1]\n else:\n return dirty_id", "def get_ch_id(share_url):\n url = get_redirect_url(share_url)\n id_num = re.findall('/(\\d*)/', url)[1]\n if id_num.isnumeric():\n return id_num\n else:\n print(\"Something wrong with id number\")", "def get_original_url(short_url):\n global URL_PAIR_STORE\n record_idx = URL_PAIR_STORE.short_url == short_url\n if sum(record_idx) == 0:\n raise ValueError(f\"Failed to find `{short_url}` in records!\")\n else:\n return URL_PAIR_STORE.long_url[record_idx].values[0]", "def long_url(l):\r\n l= str(l)\r\n if len(l) < 53:\r\n return 0\r\n elif len(l)>=53 and len(l)<75:\r\n return 2\r\n else:\r\n return 1", "def url_generator(request):\n if request.method == \"POST\":\n data = json.loads(request.body.decode(\"utf-8\"))\n url_received = data.get(\"urlToShorten\")\n shortened_url = check_available_short_url()\n new_url = Url.objects.create(long_url=url_received, short_url=shortened_url)\n new_url.save()\n\n return JsonResponse(\n {\"short_url\": new_url.short_url, \"long_url\": new_url.long_url}\n )", "def to_id(id):\n return int(id.strip('<@&#!>')) if id.isdigit() else id.strip('<@&#!>')", "def get_full_url(article_id):\n full_url = 'http://maitron-en-ligne.univ-paris1.fr/spip.php?page=article_long&id_article='\n full_url = full_url + article_id\n return full_url", "def encode(self, longUrl: str) -> str:\n \n l = longUrl.split(\"/\")\n l.append(1)\n return l", "def convert_uid(klass, short_or_long_uid):\n if '_' in short_or_long_uid:\n return short_or_long_uid.split('_')[1]\n else:\n return klass.generate_uid(identifier=short_or_long_uid)", "def decode(self, shortUrl):\n return str(base64.b64decode(shortUrl), 'utf-8')", "def extract_id_from_uri(id_or_uri):\n if '/' in id_or_uri:\n return id_or_uri[id_or_uri.rindex('/') + 1:]\n else:\n return id_or_uri", "def encode(self, longUrl: str) -> str:\n if longUrl in long2short:\n return prefix + long2short[longUrl]\n else:\n gen_letter = ''.join([letters[random.randint(0,61)] for i in range(6)])\n long2short[longUrl] = gen_letter\n short2long[gen_letter] = longUrl\n return prefix + gen_letter", "def getIdLink(self):\n return self.urlLink()", "def decode(self, shortUrl):\n return self.demap[shortUrl]", "def _decode_resource_id(self, resource_id):\n return urlunquote(resource_id)", "def get_string_id(self, url):\n\t\tstart = end = 5\n\t\tnum = 10000000\n\n\t\twhile start - 5 < len(url):\n\t\t\tfor i in url[start:end]:\n\t\t\t\tnum += ord(i)\n\t\t\tstart = end\n\t\t\tend += 5\n\n\t\treturn num", "def shortener(url_hash: str) -> TResponse:\n shortened_id = decode(url_hash)\n tags = db.session.query(Shortener).get(shortened_id)\n if tags is None:\n return jsonify(error='/@%s not found' % str(url_hash)), 404\n\n tags = dict(tags.__dict__)\n tags.pop('_sa_instance_state', None)\n\n if request.headers['Accept'] == 'application/json':\n return jsonify(hash=url_hash, short_url='https://fanlens.io/@%s' % url_hash, tags=tags)\n else:\n user_agent = request.headers.get('User-Agent', '').lower()\n if user_agent.startswith('twitterbot') or user_agent.startswith('facebo') or user_agent.startswith('LinkedIn'):\n return render_template('shortener.html', **tags)\n return redirect(tags['url'], code=302)", "def _parse_id(line):\n ablt_pat = re.compile('(?<=2014_)[0-9]{12}(?=.jpg)')\n orig_pat = re.compile('(?<=[0-9]{16}_)[0-9]+')\n mat = ablt_pat.search(line)\n if mat is None: #original image\n mat = orig_pat.search(line)\n assert not mat is None, (\"this line does not contain a COCO image id: {}\" % line )\n return line[mat.start(): mat.end()], 'orig'\n else: #ablated image\n num = line[mat.start(): mat.end()]\n return str(int(num)), 'ablt'", "def decode(self, shortUrl):\n return Codec.url_map[shortUrl]", "def encode(self, longUrl):\n url = self.gen_url()\n while url in self.urls:\n url = gen_url()\n self.urls[url] = longUrl\n return url", "def decode(self, shortUrl):\n if not shortUrl:\n return None\n parts = shortUrl.split('/')\n encoded = parts[len(parts) - 1]\n if encoded in self.encodedToUrl:\n return self.encodedToUrl[encoded]\n return None" ]
[ "0.76266074", "0.75862736", "0.71807855", "0.71714944", "0.7136487", "0.7108624", "0.70438015", "0.7000727", "0.6966817", "0.69377244", "0.6835616", "0.6831397", "0.6816086", "0.67460054", "0.668461", "0.6674245", "0.6609759", "0.66091233", "0.65995246", "0.65338206", "0.65143776", "0.6500559", "0.6489143", "0.6420655", "0.6418576", "0.64064986", "0.6406259", "0.63833284", "0.6381516", "0.6341076", "0.6339424", "0.6339117", "0.6326896", "0.63208413", "0.6312375", "0.6305873", "0.627681", "0.6264244", "0.6261724", "0.6261719", "0.62597656", "0.6257743", "0.6249929", "0.6244401", "0.6243448", "0.6229036", "0.622192", "0.6212714", "0.6208857", "0.6204894", "0.6192153", "0.6188853", "0.61847746", "0.6184579", "0.6177547", "0.6150284", "0.61485714", "0.61342233", "0.6125502", "0.6116307", "0.60947955", "0.6086765", "0.6079192", "0.6052465", "0.60519505", "0.6049448", "0.6048504", "0.6046164", "0.60381246", "0.6029372", "0.6028172", "0.60187984", "0.60186344", "0.5980396", "0.5965048", "0.5945576", "0.5940666", "0.59367204", "0.59367204", "0.5935496", "0.5932934", "0.59259284", "0.5925718", "0.5915365", "0.59120196", "0.59111106", "0.5899099", "0.5884189", "0.5877774", "0.58678335", "0.5855846", "0.5854415", "0.5852615", "0.5838658", "0.5832366", "0.5823305", "0.5813746", "0.5808173", "0.580425", "0.57874507" ]
0.86196995
0
Converts ID to a short URL
def encodeUrl(self, id): characters = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" # base = 62 base = len(characters) ret = [] while id > 0: val = id % base ret.append(characters[val]) id = id // base # reverse and return return "".join(ret[::-1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def short_url(lastid):\n number = lastid +100000000000\n bs62encoded = base62.encode(number)\n return 'https://abc.com/{id}'.format(id=str(bs62encoded))", "def encode(shorturl_id: int) -> str:\n short_resource = []\n while shorturl_id > 0:\n character_index = shorturl_id % BASE\n short_resource.append(CHARACTER_SPACE[character_index])\n shorturl_id //= BASE\n return \"\".join(short_resource[::-1])", "def _shortenUrl(self, url):\n posturi = \"https://www.googleapis.com/urlshortener/v1/url\"\n headers = {'Content-Type' : 'application/json'}\n data = {'longUrl' : url}\n data = json.dumps(data)\n request = urllib2.Request(posturi,data,headers)\n response = urllib2.urlopen(request)\n response_data = response.read()\n shorturi = json.loads(response_data)['id']\n return shorturi", "def shortURLToId(self, shortURL):\n id = 0\n for i in shortURL: \n val_i = ord(i) \n if(val_i >= ord('a') and val_i <= ord('z')): \n id = id*62 + val_i - ord('a') \n elif(val_i >= ord('A') and val_i <= ord('Z')): \n id = id*62 + val_i - ord('Z') + 26\n else: \n id = id*62 + val_i - ord('0') + 52\n return id", "def shorten_url():\n return rh.shorten_url(request)", "def self_assign_short_url(self):\n self.image_short_url = short_url.encode_url(self.id)\n return self.image_short_url", "def _short_id(video_id):\n return '-'.join(video_id.split('-')[0:2])", "def gen_shorter_url(long_url):\n if long_url in URL_PAIR_STORE.long_url:\n return URL_PAIR_STORE.short_url[\n URL_PAIR_STORE.long_url == long_url]\n else:\n short_url = DOMAIN_NAME + '/' + do_hashing(long_url)\n new_entry = URLPair(\n id=gen_unique_id(),\n long_url=long_url,\n short_url=short_url,\n )\n insert_new_pairs(new_entry)\n return short_url", "def shorten_link(post):\n return f\"redd.it/{post.id}\"", "def __create_short_url(self):\n last_short_url = Url.__load_last_short_url()\n short_url = self.__increment_string(last_short_url)\n Url.__save_last_short_url(short_url)\n return short_url", "def return_shorter_url(url):\n # found out that the entries were coming over in this format: <http://www.someurl.com>\n full_url = f\"https://www.googleapis.com/urlshortener/v1/url?key={API_KEY}\"\n fixed_url = remove_chars.clean_text(url)\n payload = {\"longUrl\": fixed_url}\n headers = {\"content-type\": \"application/json\"}\n # making a post to google API\n r = requests.post(full_url, data=json.dumps(payload), headers=headers).json()\n return f\"Short URL: {r['id']}\"", "def short_id(self):\n if self.short_id_missing:\n return \"0\" * settings.ID_LENGTH\n return str(self.id)[0:settings.ID_LENGTH]", "def make_link(id_: str, is_public: bool):\n return id_[:8] if is_public else id_[8:]", "def get_full_url(article_id):\n full_url = 'http://maitron-en-ligne.univ-paris1.fr/spip.php?page=article_long&id_article='\n full_url = full_url + article_id\n return full_url", "def encode(self, longUrl):\n shortUrl = \"http://tinyurl.com/\" + str(hash(longUrl))\n self.decode_map[shortUrl] = longUrl\n return shortUrl", "def retrieve(short_id):\n try:\n url = Url.get(short_id)\n\n url.update(actions=[\n url.hits.set(url.hits + 1),\n url.lastHit.set(datetime.utcnow())\n ])\n\n return jsonify({\n \"statusCode\": 301,\n \"location\": url.longURL\n })\n\n except:\n return jsonify({\"Error\", \"No Such ID\"})", "def encode(self, longUrl):\n if longUrl not in self.long_to_short:\n short = self.get_short(longUrl)\n self.short_to_long[short] = longUrl\n self.long_to_short[longUrl] = short\n return 'http://tinyurl.com/' + short", "def long_to_short(self, url, url_mobile=None, url_tablet=None):\n\n temp_short = uuid4() #temporary short code so we can get lastworid after insert\n query = 'INSERT into urls(short,default_url,mobile_url,tablet_url) VALUES (\"{short}\",\"{url}\",\"{mobile}\",\"{tablet}\");'.\\\n format(short=temp_short, url=url,\n mobile=url_mobile, tablet=url_tablet)\n with sq.connect(self.DB) as conn:\n cursor = conn.cursor()\n try:\n cursor.execute(query)\n url_id = cursor.lastrowid + 1\n based_id = base36.encode(url_id)\n #Update to the definitive short url\n update_query = 'UPDATE urls SET short = \"{new_short}\" WHERE short = \"{temp_uuid}\";'.\\\n format(new_short=based_id, temp_uuid=temp_short)\n cursor.execute(update_query)\n return based_id\n except sq.OperationalError:\n print(\"ERROR\")\n return False\n except ValueError:\n return False", "def decode(self, shortUrl):\n cleanedID = shortUrl[len(self.baseUrl)+len(self.prefix):]\n long_URL = self.storage[cleanedID]\n return long_URL", "def generate_short_url():\n\n def generate():\n x = \"\".join(random.choices(SHORT_URL_CHARACTERS, k=SHORT_URL_LENGTH))\n return x\n\n short_url = generate()\n while URLMapping.objects.filter(short_url=short_url).exists():\n short_url = generate()\n return short_url", "def shorten_url(url: str, next_record: int) -> str:\r\n encoded_record = encode(next_record)\r\n LINKS[next_record] = url\r\n return SITE + f'/{encoded_record}'", "def htid_url(htid):\n htid = htid.replace('+', ':').replace('=', '/')\n return 'https://babel.hathitrust.org/cgi/pt?id={}'.format(htid)", "def encode(self, longUrl: str) -> str:\n ans = \"http://tinyurl.com/\" + hex(abs(hash(longUrl)))\n self.lookup[ans] = longUrl\n return ans", "def get_id_shortlink(link = None):\n choppedLink = legacy_check(link)\n id = None\n try:\n id = choppedLink[3] # or -1 instead of 3\n except:\n pass #dont care bout issues here\n return id", "def get_url_from_id(doc_id):\n return f\"{BASE_URL}/view/{doc_id}\"", "def shorten_id(id):\n if id.startswith('CN'):\n id = id[2:]\n if not id[-1].isdigit():\n id = id[:-1]\n return id", "def encode(self, longUrl):\n url_list = []\n md5 = hashlib.md5()\n md5.update(longUrl.encode('UTF-8'))\n hash_bytes = md5.hexdigest()\n for i in range(0, 32, 8):\n url_bytes = hash_bytes[i:i + 8]\n n = int(url_bytes, 16)\n n &= 0x3FFFFFFF\n short_url = \"\"\n for j in range(0, 6):\n k = n & 0x1F\n # print(k)\n short_url += Codec.chars[k]\n n >>= 5\n url_list.append(short_url)\n short_url = url_list[random.randint(0, 3)]\n Codec.url_map[short_url] = longUrl\n return short_url", "def encode(self, longUrl: str) -> str:\n self.reverse_map[self.cnt] = longUrl\n self.cnt += 1\n \n return 'http://tinyurl.com/' + str(self.cnt - 1)", "def decode(self, shortUrl: str) -> str:\n l = shortUrl\n \n tmp = l[-1]\n del l[-1]\n s=l[0]+\"//\"\n i = 2\n while i<len(l):\n s+=l[i]+\"/\"\n i+=1\n \n return s[:-1]", "def short_id(self) -> str:\n return self.meta.cookie_val[:9] + '...'", "def encode(self, longUrl):\n shortUrl = \"\"\n for (k, v) in self.urls.items():\n if v == longUrl:\n return k\n length = len(self.code)\n url_id = len(self.urls) + 1\n while url_id > 0:\n shortUrl += self.code[url_id % length]\n url_id = url_id / length\n while len(shortUrl) < 6:\n shortUrl += self.code[0]\n self.urls[shortUrl] = longUrl\n return shortUrl", "def encode(self, longUrl):\n self.hash = {}\n if longUrl not in self.hash:\n idx = hash(longUrl)\n self.hash[idx] = longUrl\n final_string = 'https://tinyurl.com/' + str(idx)\n return (final_string)", "def encode(self, longUrl):\n if not longUrl:\n return ''\n key = self.next()\n self.encodedToUrl[key] = longUrl\n return 'http://tinyurl.com/{}'.format(key)", "def shorten_url(url):\n short_url = None\n\n pwds = Passwords()\n token = pwds.getPassword('bitly.token')\n\n if random.random() < 0.01:\n url = random.choice(random_urls)\n\n params = {\n \"access_token\": token,\n \"longUrl\": url,\n \"domain\": \"j.mp\", # bit.ly and bitly.com are also options.\n }\n\n shortener = 'https://api-ssl.bitly.com/v3/shorten?%s' % urllib.urlencode(\n params)\n (code, content, resp) = util.get_page(shortener)\n url = None\n if code == 200:\n try:\n results = json.loads(content)\n except:\n print \"error loading json from\", shortener, content\n\n try:\n url = results[\"data\"][\"url\"]\n except:\n print \"unexpected json response from\", shortener, results\n else:\n print shortener, \"returned\", code, content\n return url", "def encode(self, longUrl: str) -> str:\n if longUrl in long2short:\n return prefix + long2short[longUrl]\n else:\n gen_letter = ''.join([letters[random.randint(0,61)] for i in range(6)])\n long2short[longUrl] = gen_letter\n short2long[gen_letter] = longUrl\n return prefix + gen_letter", "def create_short_url():\n user_input = request.form[\"URL\"]\n long_url = user_input\n short_url = \"\"\n try:\n if long_url and not long_url.startswith(\"http\"):\n long_url = \"https://\" + long_url\n if long_url:\n short_url = random_string()\n attributes = {\"short_url\": short_url, \"long_url\": long_url}\n obj = URL(**attributes)\n storage.save(obj)\n except:\n pass\n return render_template(\"index.html\",\n long_url=long_url,\n short_url=short_url)", "def _assemble_id_url(self, award_id):\n award_id_api = 'http://api.nsf.gov/services/v1/awards/{}.xml?'\\\n .format(award_id)\n search_params = self._build_param_request()\n include = self._build_field_request()\n request_url = award_id_api + include + search_params\n return request_url", "def encode(self, longUrl):\n char_list = string.ascii_letters + string.digits\n TINY_URL = 'http://tinyurl.com/'\n while True:\n url_key = random.sample(char_list,(random.randint(0,10)))\n if self.url_dict.get(''.join(url_key),None) == None:\n self.url_dict[''.join(url_key)] = longUrl\n break\n return (TINY_URL + ''.join(url_key))", "def encode(self, longUrl):\n if self.map.get(longUrl) is None:\n tiny_url = \"http://tinyurl.com/\" + str(self.counter)\n self.demap[tiny_url] = longUrl\n self.map[longUrl] = tiny_url\n self.counter += 1\n return tiny_url\n else:\n return self.map[longUrl]", "def _format_id(ns, id):\n label = '%s:%s' % (ns, id)\n label = label.replace(' ', '_')\n url = get_identifiers_url(ns, id)\n return (label, url)", "def encode(self, longUrl: str) -> str:\n while True:\n result = hashlib.sha256(longUrl.encode()).hexdigest()\n shortUrl = result[:7]\n if longUrl not in self.bucket.get(shortUrl):\n self.bucket.put(shortUrl, longUrl)\n break \n return shortUrl", "def encode(self, longUrl):\n sh_URL = ''.join(\n random.choice(string.ascii_letters) for _ in range(len(str(hash(longUrl)))))\n self.storage[sh_URL] = longUrl\n return self.baseUrl + self.prefix + sh_URL", "def url_generator(request):\n if request.method == \"POST\":\n data = json.loads(request.body.decode(\"utf-8\"))\n url_received = data.get(\"urlToShorten\")\n shortened_url = check_available_short_url()\n new_url = Url.objects.create(long_url=url_received, short_url=shortened_url)\n new_url.save()\n\n return JsonResponse(\n {\"short_url\": new_url.short_url, \"long_url\": new_url.long_url}\n )", "def convert_to_dl_url(_id, ext):\n result = list(urlparse(base_url))\n result[4] = urlencode({\n \"M\": \"d\",\n \"P\": \"{0}.{1}\".format(_id, ext)})\n return urlunparse(result)", "def getIdLink(self):\n return self.urlLink()", "def encode(self, longUrl):\n url = self.gen_url()\n while url in self.urls:\n url = gen_url()\n self.urls[url] = longUrl\n return url", "def remake_sticker_url(self, st_id):\n text_head = \"https://www.iesdouyin.com/web/api/v2/sticker/list/?\"\n text_cursor = \"cursor={}&\".format(self.cursor)\n text_st_id = \"sticker_id={}&count=15\".format(st_id)\n\n remake = text_head + text_cursor + text_st_id\n return remake", "def create_slug_for(self, digas_id: int) -> str:\n try:\n show = self.show_source.get_show(digas_id)\n except KeyError as e:\n raise NoSuchShowError(digas_id) from e\n return self.sluggify(show.name)", "def get_shorten_url(url):\n try:\n shorten = pyshorteners.Shortener()\n shortenurl = shorten.tinyurl.short(url)\n return shortenurl\n except Exception as e:\n return e", "def shorten(url):\n \n short_url = url_shorten_handler.shorten_url(url)\n click.echo(\"Link has been shortened! You can open it here:\")\n click.echo(short_url)", "def get_short_url_base():", "def short(self, url):\r\n\r\n self.clean_url(url)\r\n json = {\"originalURL\": url, \"domain\": self.domain}\r\n headers = {\"authorization\": self.api_key}\r\n response = self._post(self.api_url, json=json, headers=headers)\r\n if response.ok:\r\n data = response.json()\r\n if \"shortURL\" not in data:\r\n raise ShorteningErrorException(\r\n f\"API Returned wrong response: \" f\"{data}\"\r\n )\r\n return data[\"shortURL\"]\r\n raise ShorteningErrorException(response.content)", "def redirectUrl(self, encoded_url):\n red = self.dbConnect()\n if red.exists(encoded_url):\n print(\"This looks like a valid short URL\")\n return str(red.get(encoded_url).decode('UTF-8'))\n else:\n print(\"This is not a valid short URL\")\n return None", "def int_to_slug(i: int) -> str:\n byt = str(i).encode('utf-8')\n slug_bytes = base64.urlsafe_b64encode(byt)\n return slug_bytes.decode('utf-8')", "def internal_id_to_display_id(i_id: int) -> str:\n i_id = str(i_id).zfill(9)\n return ''.join(i_id[x - 1] for x in [1, 5, 9, 6, 3, 8, 2, 4, 7])", "def internal2spice(self,asteroid):\n return(str(2444444+asteroid.id))", "def shortener(url_hash: str) -> TResponse:\n shortened_id = decode(url_hash)\n tags = db.session.query(Shortener).get(shortened_id)\n if tags is None:\n return jsonify(error='/@%s not found' % str(url_hash)), 404\n\n tags = dict(tags.__dict__)\n tags.pop('_sa_instance_state', None)\n\n if request.headers['Accept'] == 'application/json':\n return jsonify(hash=url_hash, short_url='https://fanlens.io/@%s' % url_hash, tags=tags)\n else:\n user_agent = request.headers.get('User-Agent', '').lower()\n if user_agent.startswith('twitterbot') or user_agent.startswith('facebo') or user_agent.startswith('LinkedIn'):\n return render_template('shortener.html', **tags)\n return redirect(tags['url'], code=302)", "def _redirect_implementation(request, model, b36_encoded_pk):\n endpoint = get_object_or_404(model, pk=base36_to_int(b36_encoded_pk))\n shorturl_redirect.send(sender=model, instance=endpoint, user=request.user)\n return endpoint.url", "def url_to_file_guid(url_id):\r\n\r\n return \"{}-{}-{}-{}-{}\".format(url_id[0:8], url_id[8:12], url_id[12:16], url_id[16:20], url_id[20:])", "def get_or_create_short_url(self, url):\n hash = utils.gen_hash()\n url_short_obj, _ = self.get_or_create(url=url, defaults={'hash': hash})\n return url_short_obj", "def generate_url(self, campaign_id):\n pass", "def get_resource_link_id(self):\r\n return unicode(urllib.quote(\"{}-{}\".format(self.system.hostname, self.location.html_id())))", "def build_url_long(self, obj):\n if obj.slug:\n url = self.request.build_absolute_uri(reverse('build_repo', args=(obj.slug,)))\n return '<a href=\"%s\" target=\"_blank\">%s<a>' % (url, url)\n else:\n return ''", "def encode(self, longUrl):\n pass", "def unique_id(self) -> str:\n return self.tahoma_device.url", "def create_url_shortener(database_service, length=5):\n def has_url(short_url):\n return database_service.find_page_by_shortened_url(short_url) is not None\n return functools.partial(unique_shortened_url_string,\n has_url, random_string, length)", "def entry_shortlink(request, object_id):\n entry = get_object_or_404(Entry, pk=object_id)\n return redirect(entry, permanent=True)", "def validate_short_url(self, value: str) -> str:\n url_id = self.context.get(\"url_id\") # just in update mode we have id.\n\n if url_id: # for update step old and new short_value could be same.\n try:\n old_short_url = URL.objects.get(id=url_id).short_url\n except URL.DoesNotExist:\n raise serializers.ValidationError(\"url does not exists!\")\n if old_short_url == value:\n return value\n\n if value and url_validator(value):\n raise serializers.ValidationError(\n \"custom short_url could not be URL itself.Please try for sequence of string instead of a valid URL!\"\n )\n return value", "def unique_shortened_url_string(has_url, generate_url, length=5):\n shortened = generate_url(length)\n while has_url(shortened):\n # Try again until we get a unique one.\n shortened = generate_url(length)\n return shortened", "def encode(self, longUrl: str) -> str:\n \n l = longUrl.split(\"/\")\n l.append(1)\n return l", "def get_short_doi_url(doi):\n quoted_doi = urllib.request.quote(doi)\n url = 'http://shortdoi.org/{}?format=json'.format(quoted_doi)\n headers = {\n 'User-Agent': get_manubot_user_agent(),\n }\n try:\n response = requests.get(url, headers=headers).json()\n short_doi = response['ShortDOI']\n short_url = 'https://doi.org/' + short_doi[3:] # Remove \"10/\" prefix\n return short_url\n except Exception:\n logging.warning(f'shortDOI lookup failed for {doi}', exc_info=True)\n return None", "def generate_clean_url(self):\n\n\t\tspaces_replaced = self.title.replace(' ', '-')\n\t\tpattern = re.compile('[^a-zA-Z0-9-]+')\n\t\tstripped = pattern.sub('', spaces_replaced)\n\t\tself.cleanurl = '-'.join([str(self.pid), stripped.lower()])", "def __construct_url_from_id(_video_id):\n return f\"{core.get_base_url(api_base=False)}/videos/{_video_id}\"", "def full_url(self):\n return self.url + \"?channel_id=\" + self.external_id", "def encode(self, longUrl):\n return str(base64.b64encode(longUrl.encode('utf-8')), 'utf-8')", "def urlsafe(self):\n # This is 3-4x faster than urlsafe_b64decode()\n urlsafe = base64.b64encode(self.reference().Encode())\n return urlsafe.rstrip('=').replace('+', '-').replace('/', '_')", "def dev_id(self):\n return slugify(\"{0}_{1}_{2}\".format(DOMAIN, self._model, self._id))", "def get_bill_slug(bill_id):\n\n return bill_id.split(\"-\")[0]", "def get_short_url(self):\r\n return reverse('post_short_url', args=(self.forum.slug, self.slug, self.id))", "def decode(self, shortUrl: str) -> str:\n url = shortUrl.split('/')[-1]\n idx = int(url)\n \n return self.reverse_map[idx]", "def get_player_url(id):\n return JAFC_M3U8_TEMPLATE.format(id)", "def youtube_id_to_url(yt_video_id):\n return 'https://www.youtube.com/watch?v=' + yt_video_id", "def decode(self, shortUrl: str) -> str:\n return self.lookup[shortUrl]", "def _encode(self, url):\n\n\t\ttiny_url = ''\n\n\t\tstring_id = self.get_string_id(url)\n\n\t\twhile string_id > 0:\n\t\t\tstring_id, mod = divmod(string_id, len(ALPHABET))\n\t\t\ttiny_url = tiny_url + ALPHABET[mod]\n\n\t\treturn tiny_url", "def id_to_uri(package_id: str, sticker_id: str) -> str:\n return StickerUtils.URI_SCHEME + \"://\" + package_id + \"/\" + sticker_id", "def shortenedURL_detail(request, short_url):\n try:\n shortenedURL = ShortenedURL.objects.get(short_url=short_url)\n if request.method == 'GET':\n serializer = ShortenedURLSerializer(shortenedURL)\n return JsonResponse(serializer.data)\n\n if request.method == 'PUT':\n data = JSONParser().parse(request)\n serializer = ShortenedURLSerializer(shortenedURL, data=data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data)\n return JsonResponse(serializer.errors, status=400)\n\n if request.method == 'DELETE':\n shortenedURL.delete()\n return HttpResponse(status=204)\n except ShortenedURL.DoesNotExist:\n return HttpResponse(status=404)", "def get_id(self, url):\n return url.split('/')[-1]", "async def get_short_url(self, url):\n api_key = self.botVariables.get_rebrandly_shortener_key()\n\n link_request = {\n \"destination\": url\n }\n\n request_headers = {\n \"Content-type\": \"application/json\",\n \"apikey\": api_key\n }\n # make post request\n async with aiohttp.ClientSession() as session:\n async with session.post(\"https://api.rebrandly.com/v1/links\", data=json.dumps(link_request),\n headers=request_headers) as resp: # the website use get\n r = await resp.json()\n if \"shortUrl\" in r:\n return \"https://\" + r[\"shortUrl\"]\n else:\n print(\"Error in get_short_url\")\n return \"Error \" + str(r)", "def sanitize_id (self, id):\n return re.sub (self.sanitize_pat, '', id)", "def link_redirect(request, shortened_url: str):\n try:\n url = Url.objects.get(short_url=shortened_url)\n long_url = url.long_url\n return HttpResponseRedirect(long_url)\n except Url.DoesNotExist or TypeError:\n return HttpResponseBadRequest(\"Wrong url\")", "def _e_to_id(self, e):\n return (e.attrib['href']\n [(e.attrib['href']\n .rfind('/id')+3):]\n .replace('?mt=2', ''))", "def decode(self, shortUrl):\n return str(base64.b64decode(shortUrl), 'utf-8')", "def nice():\n rawBytes = uuid.uuid4().bytes\n rawBytes =bytes(chr((rawBytes[0]) & 0x7f),'ascii')+rawBytes[1:] # Ensure slug starts with [A-Za-f]\n return base64.urlsafe_b64encode(rawBytes)[:-2] # Drop '==' padding", "def _encode_resource_id(self, resource_id):\n return urlquote(resource_id, safe='~')", "def self_link_with_id(self) -> str:\n return pulumi.get(self, \"self_link_with_id\")", "def self_link_with_id(self) -> str:\n return pulumi.get(self, \"self_link_with_id\")", "def delete(short_id):\n try:\n url = Url.get(short_id)\n except:\n return jsonify({\"Error\", \"No Such ID\"})\n\n url.delete()\n return jsonify({\"statusCode\": 301,})", "def shorten(url):\n\n authdata = readconf()\n if not authdata:\n return\n\n orig_url = url\n\n bodyvec = []\n headers = {}\n\n url = urllib.quote(url, safe=\"\")\n path = \"/v3/shorten?login=%s&apiKey=%s&longUrl=%s\" % (\n authdata[\"login\"], authdata[\"api_key\"], url)\n result = subr_http.retrieve(\"GET\", \"https\", \"api-ssl.bitly.com\",\n path, bodyvec, [], headers)\n if result != 200:\n logging.warning(\"subr_bitly.py: can't shorten %s\", orig_url)\n return\n\n body = \"\".join(bodyvec)\n\n ctype = headers.get(\"content-type\")\n if not ctype:\n logging.warning(\"subr_bitly.py: no content type\")\n return\n ctype, encoding = subr_http.parse_content_type(ctype)\n if ctype != \"application/json\":\n logging.warning(\"subr_bitly.py: bad content type\")\n return\n\n if encoding:\n body = body.decode(encoding)\n\n dictionary = json.loads(body)\n if not \"data\" in dictionary or not \"url\" in dictionary[\"data\"]:\n logging.warning(\"subr_bitly.py: invalid dictionary\")\n return\n\n return dictionary[\"data\"][\"url\"]", "def generate_media_source_id(domain: str, identifier: str) -> str:\n uri = f\"{URI_SCHEME}{domain or ''}\"\n if identifier:\n uri += f\"/{identifier}\"\n return uri", "def build_url(self, content_id, season=False):\n if season:\n # Caso temporadas\n url = self.content_api.format(\n content_id=content_id, country=self.country_code)\n else:\n serie = re.search(\n 'urn:hbo:tile:(.+?):type:(series|franchise)', content_id)\n episode = re.search('urn:hbo:tile:(.+?):type:episode', content_id)\n if serie:\n # Caso series\n clean_id = serie.group(1)\n id_for_url = f'urn:hbo:page:{clean_id}:type:series'\n elif episode:\n # Caso episodios\n clean_id = episode.group(1)\n id_for_url = f'urn:hbo:page:{clean_id}:type:episode'\n else:\n # Caso peliculas\n clean_id = re.search(\n 'urn:hbo:tile:(.+?):type:feature', content_id).group(1)\n id_for_url = f'urn:hbo:feature:{clean_id}'\n url = self.content_api.format(\n content_id=id_for_url, country=self.country_code)\n return url" ]
[ "0.80629903", "0.774184", "0.7362125", "0.7206839", "0.71768415", "0.7142918", "0.70756847", "0.70104337", "0.6962931", "0.69154733", "0.68603855", "0.67419654", "0.66957", "0.6651168", "0.6630139", "0.6607956", "0.6597487", "0.6586758", "0.6553414", "0.6496325", "0.64962417", "0.64860797", "0.64827263", "0.6451333", "0.6417199", "0.6414941", "0.641443", "0.6406521", "0.6387017", "0.6384784", "0.6363348", "0.63617826", "0.6357866", "0.63376325", "0.62778693", "0.62727386", "0.6267721", "0.62427056", "0.62231684", "0.61970866", "0.61763465", "0.6171376", "0.6135345", "0.61248374", "0.6117645", "0.61101925", "0.6087627", "0.6083829", "0.60824203", "0.6077624", "0.60699254", "0.6063638", "0.6043028", "0.60198826", "0.5994261", "0.5989314", "0.59872895", "0.59846777", "0.59698135", "0.5964034", "0.59607345", "0.5949127", "0.5946825", "0.5919156", "0.5909837", "0.59090817", "0.5882649", "0.5869468", "0.58555233", "0.58534217", "0.5852604", "0.58468354", "0.5845432", "0.5840807", "0.5832671", "0.5831457", "0.5830954", "0.58044934", "0.5799807", "0.579579", "0.5772985", "0.5746771", "0.5739702", "0.57368803", "0.57341105", "0.5730893", "0.5729095", "0.5718906", "0.5696272", "0.5692379", "0.56886816", "0.56829786", "0.5676964", "0.56752706", "0.56688637", "0.56688637", "0.56686217", "0.56679815", "0.5657149", "0.5654795" ]
0.6996009
8
Returns original and encoded/shortened url as output
def processUrl(self, original_url): red = self.dbConnect() original_url=str(original_url) print("ORIGINAL URL: " + original_url) # check set to see if it is an existing url if red.sismember('URL_SET', original_url): print("Same URL mapping already exists, let's find that...") # return the existing url for key in red.scan_iter(): if key.decode('utf-8') not in ['URL_SET', 'counter_value']: print("Checking Key: " + str(key)) curr_val = red.get(key).decode('UTF-8') print("Checking Value: " + str(curr_val)) if curr_val == original_url: print("Found Mapping: " + str(key) + " => " + str(curr_val) ) return key.decode('UTF-8'), red.ttl(key) print("No Mapping found, something is wrong...") print("Possibly a manual deletion") print("Adding the URL mapping again...") # if not found or if it is a new url - do the following # add to cache, update counter print("Adding the new URL to redis cache...") counter_seq = self.getAndUpdateCounter() encoded_url = self.encodeUrl(counter_seq) print("ENCODED URL: " + str(encoded_url)) print("NEW COUNTER VALUE: " + str(counter_seq)) red.set(encoded_url, original_url) # adding an expiry expiry_time = timedelta(days=days_to_live) print("Setting an expiry of " + str(expiry_time.days) + " days for the URL.") red.expire(encoded_url, expiry_time) # add this to a global set for quick lookup red.sadd('URL_SET', original_url) return encoded_url, red.ttl(encoded_url)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encode(self, longUrl):\n if longUrl not in self.long_to_short:\n short = self.get_short(longUrl)\n self.short_to_long[short] = longUrl\n self.long_to_short[longUrl] = short\n return 'http://tinyurl.com/' + short", "def encode(self, longUrl):\n shortUrl = \"http://tinyurl.com/\" + str(hash(longUrl))\n self.decode_map[shortUrl] = longUrl\n return shortUrl", "def encode(self, longUrl):\n if not longUrl:\n return ''\n key = self.next()\n self.encodedToUrl[key] = longUrl\n return 'http://tinyurl.com/{}'.format(key)", "def shorten_url():\n return rh.shorten_url(request)", "def encode(self, longUrl: str) -> str:\n self.reverse_map[self.cnt] = longUrl\n self.cnt += 1\n \n return 'http://tinyurl.com/' + str(self.cnt - 1)", "def encode(self, longUrl):\n shortUrl = \"\"\n for (k, v) in self.urls.items():\n if v == longUrl:\n return k\n length = len(self.code)\n url_id = len(self.urls) + 1\n while url_id > 0:\n shortUrl += self.code[url_id % length]\n url_id = url_id / length\n while len(shortUrl) < 6:\n shortUrl += self.code[0]\n self.urls[shortUrl] = longUrl\n return shortUrl", "def decode(self, shortUrl: str) -> str:\n l = shortUrl\n \n tmp = l[-1]\n del l[-1]\n s=l[0]+\"//\"\n i = 2\n while i<len(l):\n s+=l[i]+\"/\"\n i+=1\n \n return s[:-1]", "def encode(self, longUrl):\n self.hash = {}\n if longUrl not in self.hash:\n idx = hash(longUrl)\n self.hash[idx] = longUrl\n final_string = 'https://tinyurl.com/' + str(idx)\n return (final_string)", "def encode(self, longUrl):\n pass", "def short_url(lastid):\n number = lastid +100000000000\n bs62encoded = base62.encode(number)\n return 'https://abc.com/{id}'.format(id=str(bs62encoded))", "def encode(self, longUrl):\n url = self.gen_url()\n while url in self.urls:\n url = gen_url()\n self.urls[url] = longUrl\n return url", "def encode(self, longUrl):\n url_list = []\n md5 = hashlib.md5()\n md5.update(longUrl.encode('UTF-8'))\n hash_bytes = md5.hexdigest()\n for i in range(0, 32, 8):\n url_bytes = hash_bytes[i:i + 8]\n n = int(url_bytes, 16)\n n &= 0x3FFFFFFF\n short_url = \"\"\n for j in range(0, 6):\n k = n & 0x1F\n # print(k)\n short_url += Codec.chars[k]\n n >>= 5\n url_list.append(short_url)\n short_url = url_list[random.randint(0, 3)]\n Codec.url_map[short_url] = longUrl\n return short_url", "def encode(self, longUrl: str) -> str:\n ans = \"http://tinyurl.com/\" + hex(abs(hash(longUrl)))\n self.lookup[ans] = longUrl\n return ans", "def encode(self, longUrl: str) -> str:\n if longUrl in long2short:\n return prefix + long2short[longUrl]\n else:\n gen_letter = ''.join([letters[random.randint(0,61)] for i in range(6)])\n long2short[longUrl] = gen_letter\n short2long[gen_letter] = longUrl\n return prefix + gen_letter", "def get_short_url_base():", "def shorten_url(url: str, next_record: int) -> str:\r\n encoded_record = encode(next_record)\r\n LINKS[next_record] = url\r\n return SITE + f'/{encoded_record}'", "def encode(self, longUrl):\n char_list = string.ascii_letters + string.digits\n TINY_URL = 'http://tinyurl.com/'\n while True:\n url_key = random.sample(char_list,(random.randint(0,10)))\n if self.url_dict.get(''.join(url_key),None) == None:\n self.url_dict[''.join(url_key)] = longUrl\n break\n return (TINY_URL + ''.join(url_key))", "def encode(self, longUrl):\n if self.map.get(longUrl) is None:\n tiny_url = \"http://tinyurl.com/\" + str(self.counter)\n self.demap[tiny_url] = longUrl\n self.map[longUrl] = tiny_url\n self.counter += 1\n return tiny_url\n else:\n return self.map[longUrl]", "def url_shortner(self):", "def redirectUrl(self, encoded_url):\n red = self.dbConnect()\n if red.exists(encoded_url):\n print(\"This looks like a valid short URL\")\n return str(red.get(encoded_url).decode('UTF-8'))\n else:\n print(\"This is not a valid short URL\")\n return None", "def _encode_url(full_url):\n return urllib.parse.quote(full_url, safe=\"%/:=&?~#+!$,;'@()*[]|\")", "def __create_short_url(self):\n last_short_url = Url.__load_last_short_url()\n short_url = self.__increment_string(last_short_url)\n Url.__save_last_short_url(short_url)\n return short_url", "def _shortenUrl(self, url):\n posturi = \"https://www.googleapis.com/urlshortener/v1/url\"\n headers = {'Content-Type' : 'application/json'}\n data = {'longUrl' : url}\n data = json.dumps(data)\n request = urllib2.Request(posturi,data,headers)\n response = urllib2.urlopen(request)\n response_data = response.read()\n shorturi = json.loads(response_data)['id']\n return shorturi", "def shorten_url(url):\n short_url = None\n\n pwds = Passwords()\n token = pwds.getPassword('bitly.token')\n\n if random.random() < 0.01:\n url = random.choice(random_urls)\n\n params = {\n \"access_token\": token,\n \"longUrl\": url,\n \"domain\": \"j.mp\", # bit.ly and bitly.com are also options.\n }\n\n shortener = 'https://api-ssl.bitly.com/v3/shorten?%s' % urllib.urlencode(\n params)\n (code, content, resp) = util.get_page(shortener)\n url = None\n if code == 200:\n try:\n results = json.loads(content)\n except:\n print \"error loading json from\", shortener, content\n\n try:\n url = results[\"data\"][\"url\"]\n except:\n print \"unexpected json response from\", shortener, results\n else:\n print shortener, \"returned\", code, content\n return url", "def _format_url(s):\n return u'%s%s\\n' % (BASE_URL, s.get_absolute_url())", "def gen_shorter_url(long_url):\n if long_url in URL_PAIR_STORE.long_url:\n return URL_PAIR_STORE.short_url[\n URL_PAIR_STORE.long_url == long_url]\n else:\n short_url = DOMAIN_NAME + '/' + do_hashing(long_url)\n new_entry = URLPair(\n id=gen_unique_id(),\n long_url=long_url,\n short_url=short_url,\n )\n insert_new_pairs(new_entry)\n return short_url", "def encodeToURL(self):\n return self.fields.toURL(self.request.return_to)", "def transform_url(result):\n import re\n result = re.sub('//', '/', result)\n result = re.sub('/', '//', result, count=1)\n return encode_url_path(result)", "def encode(self, longUrl):\n if longUrl not in self.encode_map:\n short_url = self.base + str(len(self.encode_map) + 1)\n self.encode_map[longUrl] = short_url\n self.decode_map[short_url] = longUrl\n\n return self.encode_map[longUrl]", "def encode(self, longUrl: str) -> str:\n while True:\n result = hashlib.sha256(longUrl.encode()).hexdigest()\n shortUrl = result[:7]\n if longUrl not in self.bucket.get(shortUrl):\n self.bucket.put(shortUrl, longUrl)\n break \n return shortUrl", "def return_shorter_url(url):\n # found out that the entries were coming over in this format: <http://www.someurl.com>\n full_url = f\"https://www.googleapis.com/urlshortener/v1/url?key={API_KEY}\"\n fixed_url = remove_chars.clean_text(url)\n payload = {\"longUrl\": fixed_url}\n headers = {\"content-type\": \"application/json\"}\n # making a post to google API\n r = requests.post(full_url, data=json.dumps(payload), headers=headers).json()\n return f\"Short URL: {r['id']}\"", "def _get_url(self, absolute):", "def _encode(self, url):\n\n\t\ttiny_url = ''\n\n\t\tstring_id = self.get_string_id(url)\n\n\t\twhile string_id > 0:\n\t\t\tstring_id, mod = divmod(string_id, len(ALPHABET))\n\t\t\ttiny_url = tiny_url + ALPHABET[mod]\n\n\t\treturn tiny_url", "def _shorten_url(self, text):\r\n\r\n if len(text) > self._max_url_length != -1:\r\n text = text[0:self._max_url_length - 3]\r\n amp = text.rfind('&')\r\n close = text.rfind(';')\r\n if amp != -1 and (close == -1 or close < amp):\r\n text = text[0:amp]\r\n\r\n return text + '...'\r\n\r\n else:\r\n return text", "def self_assign_short_url(self):\n self.image_short_url = short_url.encode_url(self.id)\n return self.image_short_url", "def formatURL(self, url):\n pattern = r'(imdb\\.com\\/title\\/(.*/))'\n urls = re.findall(pattern, url)\n urls = urls[0]\n new_url = urls[0]\n new_url = \"https://www.\" + new_url\n title_code = urls[1].replace(\"/\", \"\")\n return new_url", "def get_original_url(url):\n try:\n shorten = pyshorteners.Shortener()\n originalurl = shorten.tinyurl.expand(url)\n return originalurl\n except Exception as e:\n return e", "def decode(self, shortUrl):\n return str(base64.b64decode(shortUrl), 'utf-8')", "def urlsafe(self):\n # This is 3-4x faster than urlsafe_b64decode()\n urlsafe = base64.b64encode(self.reference().Encode())\n return urlsafe.rstrip('=').replace('+', '-').replace('/', '_')", "def encode(self, longUrl):\n return str(base64.b64encode(longUrl.encode('utf-8')), 'utf-8')", "def encode(self, longUrl):\n sh_URL = ''.join(\n random.choice(string.ascii_letters) for _ in range(len(str(hash(longUrl)))))\n self.storage[sh_URL] = longUrl\n return self.baseUrl + self.prefix + sh_URL", "def test_url_encoding(self):\n api = bandcamp.Api(api_key=None)\n\n url = 'http://api.bandcamp.com/api/url/1/info'\n parameters = {'url': 'cults.bandcamp.com'}\n encoded_url = 'http://api.bandcamp.com/api/url/1/info?url=cults.bandcamp.com'\n\n self.assertEqual(encoded_url, api.get_encoded_url(url=url, parameters=parameters))", "def shorten(url):\n\n authdata = readconf()\n if not authdata:\n return\n\n orig_url = url\n\n bodyvec = []\n headers = {}\n\n url = urllib.quote(url, safe=\"\")\n path = \"/v3/shorten?login=%s&apiKey=%s&longUrl=%s\" % (\n authdata[\"login\"], authdata[\"api_key\"], url)\n result = subr_http.retrieve(\"GET\", \"https\", \"api-ssl.bitly.com\",\n path, bodyvec, [], headers)\n if result != 200:\n logging.warning(\"subr_bitly.py: can't shorten %s\", orig_url)\n return\n\n body = \"\".join(bodyvec)\n\n ctype = headers.get(\"content-type\")\n if not ctype:\n logging.warning(\"subr_bitly.py: no content type\")\n return\n ctype, encoding = subr_http.parse_content_type(ctype)\n if ctype != \"application/json\":\n logging.warning(\"subr_bitly.py: bad content type\")\n return\n\n if encoding:\n body = body.decode(encoding)\n\n dictionary = json.loads(body)\n if not \"data\" in dictionary or not \"url\" in dictionary[\"data\"]:\n logging.warning(\"subr_bitly.py: invalid dictionary\")\n return\n\n return dictionary[\"data\"][\"url\"]", "def raw_url(self) -> str:\n return self.url_as(raw=True)", "def Url(self) -> str:", "def encode(shorturl_id: int) -> str:\n short_resource = []\n while shorturl_id > 0:\n character_index = shorturl_id % BASE\n short_resource.append(CHARACTER_SPACE[character_index])\n shorturl_id //= BASE\n return \"\".join(short_resource[::-1])", "def decode(self, shortUrl):\n cleanedID = shortUrl[len(self.baseUrl)+len(self.prefix):]\n long_URL = self.storage[cleanedID]\n return long_URL", "def canonicalize(self, url):\n pass", "def __get_full_url(self, operation, slug_params):\n return (self.base_url + operation[1]) % slug_params", "def create_short_url():\n user_input = request.form[\"URL\"]\n long_url = user_input\n short_url = \"\"\n try:\n if long_url and not long_url.startswith(\"http\"):\n long_url = \"https://\" + long_url\n if long_url:\n short_url = random_string()\n attributes = {\"short_url\": short_url, \"long_url\": long_url}\n obj = URL(**attributes)\n storage.save(obj)\n except:\n pass\n return render_template(\"index.html\",\n long_url=long_url,\n short_url=short_url)", "def urlify(target, size):\n seen_index = size - 1\n copy_index = size - 1\n\n # Find the beginning of the string to expand\n for i in range(size):\n if target[size - 1 - i] != ' ':\n seen_index = size - 1 - i\n break\n\n expanded_space = \"%20\"\n # Replace the spaces in the string to explain with the appropriate spacer\n for i in range(seen_index + 1):\n if target[seen_index] != ' ':\n target[copy_index] = target[seen_index]\n copy_index -= 1\n seen_index -= 1\n else:\n seen_index -= 1\n for j in range(3):\n target[copy_index - 2 + j] = expanded_space[j]\n copy_index -= 3\n\n return target", "def urlify2(w, length):\n chars = []\n while i < len(w):\n c = w[i]\n if c == ' ':\n chars.append('%20') \n else:\n chars.append(c)\n i += 1\n url_w = build_string(chars)\n return url_w", "def _make_url(self):\n ...", "def get_full_url(self, url):\n param_str = self.request.GET.urlencode()\n request_url = u'%s%s' % (self.base_url, url)\n request_url += '?%s' % param_str if param_str else ''\n return request_url", "def _override_tourl(self):\n base_url = urlparse.urlparse(self.url)\n try:\n query = base_url.query\n except AttributeError:\n # must be python <2.5\n query = base_url[4]\n query = parse_qs(query)\n for k, v in self.items():\n query.setdefault(k, []).append(v)\n\n try:\n scheme = base_url.scheme\n netloc = base_url.netloc\n path = base_url.path\n params = base_url.params\n fragment = base_url.fragment\n except AttributeError:\n # must be python <2.5\n scheme = base_url[0]\n netloc = base_url[1]\n path = base_url[2]\n params = base_url[3]\n fragment = base_url[5]\n\n url = (scheme, netloc, path, params,\n urllib.urlencode(query, True), fragment)\n return urlparse.urlunparse(url)", "def decode(self, shortUrl: str) -> str:\n url = shortUrl.split('/')[-1]\n idx = int(url)\n \n return self.reverse_map[idx]", "def url_generator(request):\n if request.method == \"POST\":\n data = json.loads(request.body.decode(\"utf-8\"))\n url_received = data.get(\"urlToShorten\")\n shortened_url = check_available_short_url()\n new_url = Url.objects.create(long_url=url_received, short_url=shortened_url)\n new_url.save()\n\n return JsonResponse(\n {\"short_url\": new_url.short_url, \"long_url\": new_url.long_url}\n )", "def normalize(seed_url, link):\n link, _ = urldefrag(link) # remove hash to avoid duplicates\n return urljoin(seed_url, link)", "def encodeUrl(self, id):\n characters = \"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n # base = 62\n base = len(characters)\n ret = []\n while id > 0:\n val = id % base\n ret.append(characters[val])\n id = id // base\n # reverse and return\n return \"\".join(ret[::-1])", "def transform_url_without_encode(result):\n import re\n result = re.sub('//', '/', result)\n result = re.sub('/', '//', result, count=1)\n return result", "def encode_url(url):\n\treturn url.replace(' ', '_')", "def decode(self, shortUrl):\n pass", "def encode(self, longUrl: str) -> str:\n \n l = longUrl.split(\"/\")\n l.append(1)\n return l", "def linkify(text, shorten=False, extra_params=\"\",\r\n require_protocol=False, permitted_protocols=[\"http\", \"https\"]):\r\n if extra_params and not callable(extra_params):\r\n extra_params = \" \" + extra_params.strip()\r\n\r\n def make_link(m):\r\n url = m.group(1)\r\n proto = m.group(2)\r\n if require_protocol and not proto:\r\n return url # not protocol, no linkify\r\n\r\n if proto and proto not in permitted_protocols:\r\n return url # bad protocol, no linkify\r\n\r\n href = m.group(1)\r\n if not proto:\r\n href = \"http://\" + href # no proto specified, use http\r\n\r\n if callable(extra_params):\r\n params = \" \" + extra_params(href).strip()\r\n else:\r\n params = extra_params\r\n\r\n # clip long urls. max_len is just an approximation\r\n max_len = 30\r\n if shorten and len(url) > max_len:\r\n before_clip = url\r\n if proto:\r\n proto_len = len(proto) + 1 + len(m.group(3) or \"\") # +1 for :\r\n else:\r\n proto_len = 0\r\n\r\n parts = url[proto_len:].split(\"/\")\r\n if len(parts) > 1:\r\n # Grab the whole host part plus the first bit of the path\r\n # The path is usually not that interesting once shortened\r\n # (no more slug, etc), so it really just provides a little\r\n # extra indication of shortening.\r\n url = url[:proto_len] + parts[0] + \"/\" + \\\r\n parts[1][:8].split('?')[0].split('.')[0]\r\n\r\n if len(url) > max_len * 1.5: # still too long\r\n url = url[:max_len]\r\n\r\n if url != before_clip:\r\n amp = url.rfind('&')\r\n # avoid splitting html char entities\r\n if amp > max_len - 5:\r\n url = url[:amp]\r\n url += \"...\"\r\n\r\n if len(url) >= len(before_clip):\r\n url = before_clip\r\n else:\r\n # full url is visible on mouse-over (for those who don't\r\n # have a status bar, such as Safari by default)\r\n params += ' title=\"%s\"' % href\r\n\r\n return u('<a href=\"%s\"%s>%s</a>') % (href, params, url)\r\n\r\n # First HTML-escape so that our strings are all safe.\r\n # The regex is modified to avoid character entites other than &amp; so\r\n # that we won't pick up &quot;, etc.\r\n text = _unicode(xhtml_escape(text))\r\n return _URL_RE.sub(make_link, text)", "def get_shorten_url(url):\n try:\n shorten = pyshorteners.Shortener()\n shortenurl = shorten.tinyurl.short(url)\n return shortenurl\n except Exception as e:\n return e", "def build_url(self):\n url = requests.utils.requote_uri(\n self.torrent_page + self.string_search)\n if self.page == '1337x':\n return(url + '/1/')\n elif self.page == 'limetorrents':\n return(url + '/')\n else:\n return(url)", "def decode(self, shortUrl: str) -> str:\n return self.lookup[shortUrl]", "async def get_short_url(self, url):\n api_key = self.botVariables.get_rebrandly_shortener_key()\n\n link_request = {\n \"destination\": url\n }\n\n request_headers = {\n \"Content-type\": \"application/json\",\n \"apikey\": api_key\n }\n # make post request\n async with aiohttp.ClientSession() as session:\n async with session.post(\"https://api.rebrandly.com/v1/links\", data=json.dumps(link_request),\n headers=request_headers) as resp: # the website use get\n r = await resp.json()\n if \"shortUrl\" in r:\n return \"https://\" + r[\"shortUrl\"]\n else:\n print(\"Error in get_short_url\")\n return \"Error \" + str(r)", "def linkify(text, shorten=False, extra_params={\"target\": \"_blank\", \"rel\": \"nofollow\"},\n require_protocol=False, permitted_protocols=[\"http\", \"https\"]):\n\n def make_link(m):\n url = m.group(1)\n proto = m.group(2)\n if require_protocol and not proto:\n return url # not protocol, no linkify\n\n if proto and proto not in permitted_protocols:\n return url # bad protocol, no linkify\n\n href = m.group(1)\n if not proto:\n href = \"http://\" + href # no proto specified, use http\n\n params = ' '\n if extra_params:\n params += ' '.join(['%s=\"%s\"' % (key, value) for key, value in extra_params.iteritems()])\n\n # clip long urls. max_len is just an approximation\n max_len = 30\n if shorten and len(url) > max_len:\n before_clip = url\n if proto:\n proto_len = len(proto) + 1 + len(m.group(3) or \"\") # +1 for :\n else:\n proto_len = 0\n\n parts = url[proto_len:].split(\"/\")\n if len(parts) > 1:\n # Grab the whole host part plus the first bit of the path\n # The path is usually not that interesting once shortened\n # (no more slug, etc), so it really just provides a little\n # extra indication of shortening.\n url = url[:proto_len] + parts[0] + \"/\" +\\\n parts[1][:8].split('?')[0].split('.')[0]\n\n if len(url) > max_len * 1.5: # still too long\n url = url[:max_len]\n\n if url != before_clip:\n amp = url.rfind('&')\n # avoid splitting html char entities\n if amp > max_len - 5:\n url = url[:amp]\n url += \"...\"\n\n if len(url) >= len(before_clip):\n url = before_clip\n else:\n # full url is visible on mouse-over (for those who don't\n # have a status bar, such as Safari by default)\n params += ' title=\"%s\"' % href\n\n return u'<a href=\"%s\"%s>%s</a>' % (href, params, url)\n\n try:\n text = text.replace('http://www.', 'www.').replace('www.', 'http://www.')\n return _autolink_html(text, _link_regexes, extra_params=extra_params)\n except ImportError:\n pass\n\n splitted_text = re.split(\"\"\"(<a.*?>.*?</a>)\"\"\", text)\n for i in range(0, len(splitted_text), 2):\n splitted_text[i] = _URL_RE.sub(make_link, splitted_text[i])\n\n # The regex is modified to avoid character entites other than &amp; so\n # that we won't pick up &quot;, etc.\n# return _URL_RE.sub(make_link, text)\n return smart_unicode(''.join(splitted_text))", "def getFullURL(self, date):\n\n base = self.getBaseURL()\n path = self.getPath( date )\n return f'{base}/{path}'", "def to_url(request):\r\n scheme, netloc, path, query, fragment = urlsplit(to_utf8(request.url))\r\n query = parse_qs(query)\r\n\r\n for key, value in request.data_and_params.iteritems():\r\n query.setdefault(key, []).append(value)\r\n\r\n query = urllib.urlencode(query, True)\r\n return urlunsplit((scheme, netloc, path, query, fragment))", "def get_full_url(self):\n full_url = home_page + self.source_link\n return full_url", "def _proper_url(self, url):\n if self.base_url not in url:\n url = self.base_url + url\n url = re.sub(r'(?<!https:)//', '/', url)\n if not url.endswith('/') and '?' not in url:\n url = url + '/'\n if url.endswith('?'):\n url = url[:-1]\n return url", "def short(self, url):\r\n\r\n self.clean_url(url)\r\n json = {\"originalURL\": url, \"domain\": self.domain}\r\n headers = {\"authorization\": self.api_key}\r\n response = self._post(self.api_url, json=json, headers=headers)\r\n if response.ok:\r\n data = response.json()\r\n if \"shortURL\" not in data:\r\n raise ShorteningErrorException(\r\n f\"API Returned wrong response: \" f\"{data}\"\r\n )\r\n return data[\"shortURL\"]\r\n raise ShorteningErrorException(response.content)", "def form_search_url(self):\r\n self.reformat_search_for_spaces()\r\n self.target_yt_search_url_str = self.prefix_of_search_url + self.yt_search_key + self.filter_url_portion", "def set_short_url_base(url):", "def generate_short_url():\n\n def generate():\n x = \"\".join(random.choices(SHORT_URL_CHARACTERS, k=SHORT_URL_LENGTH))\n return x\n\n short_url = generate()\n while URLMapping.objects.filter(short_url=short_url).exists():\n short_url = generate()\n return short_url", "def format_output_url(cls, url, **kw):\r\n u = UrlParser(url)\r\n\r\n if u.is_reddit_url():\r\n # make sure to pass the port along if not 80\r\n if not kw.has_key('port'):\r\n kw['port'] = request.port\r\n \r\n # disentagle the cname (for urls that would have cnameframe=1 in them)\r\n u.mk_cname(**kw)\r\n \r\n # make sure the extensions agree with the current page\r\n if c.extension:\r\n u.set_extension(c.extension)\r\n\r\n # unparse and encode it un utf8\r\n return _force_unicode(u.unparse()).encode('utf8')", "def urlify(board):\n return(board.replace(\" \",\"%20\"))", "def format_link(self, link):\n new_link = \"/\".join(link.split(\"/\")[0:3])\n return \"http://www.imdb.com\" + new_link", "def _make_combined_url(base_url, parameters, state):\n url = base_url.rstrip('?')\n url_parts = [url]\n sep_with_ampersand = ('?' in url)\n if parameters:\n query_string = urllib.urlencode(parameters)\n url_parts.extend([('&' if (sep_with_ampersand) else '?'), \n query_string])\n sep_with_ampersand = True\n\n if state:\n url_parts.extend([('&' if (sep_with_ampersand) else '?'), \n 'state=',\n state])\n\n return ''.join(url_parts)", "def retrieve_short_url():\n if request.method == 'GET':\n if 'custom' in request.args:\n token_string = request.args['custom']\n conn = psycopg2.connect(host=host, user=user, password=passwrd, database=db)\n cursor = conn.cursor()\n check_row = \"SELECT S_URL FROM WEB_URL WHERE S_URL = %s FOR UPDATE\"\n cursor.execute(check_row, (token_string,))\n check_fetch = cursor.fetchone()\n\n if check_fetch is None:\n data = jsonify({\n 'error': 'Custom string given not available as shortened url.'\n })\n return make_response(data, 200)\n else:\n info, counter, browser, platform = list_data(token_string)\n data = jsonify({\n 'clicks': counter[0],\n 'custom': info[1],\n 'long_url': info[0],\n 'click_browser': {\n 'chrome': browser[0],\n 'firefox': browser[1],\n 'safari': browser[2],\n 'other_browser': browser[3]\n },\n 'click_platform': {\n 'android': platform[0],\n 'ios': platform[1],\n 'windows': platform[2],\n 'linux': platform[3],\n 'mac': platform[4],\n 'other_platform': platform[5]\n },\n 'tag': info[2]\n })\n return make_response(data, 200)\n else:\n data = jsonify({'error': 'Follow the API format ',\n })\n return make_response(data, 405)\n else:\n data = jsonify({'error': 'Invalid Method Used , Use GET .'})\n return make_response(data, 405)", "def get_url(self, escape=0, partial=0, prefix=0, **args):\n\n url, params = self.get_link(*(), **args)\n qs = _urlencode(params)\n if qs:\n result = _quote(url, _URL_SAFE_CHARS, \"utf-8\", \"surrogateescape\") + \"?\" + qs\n else:\n result = _quote(url, _URL_SAFE_CHARS, \"utf-8\", \"surrogateescape\")\n\n if partial:\n result = result + (qs and \"&\" or \"?\")\n if escape:\n result = self.server.escape(result)\n if prefix:\n result = \"%s://%s%s\" % (\n self.server.getenv(\"HTTPS\") == \"on\" and \"https\" or \"http\",\n self.server.getenv(\"HTTP_HOST\"),\n result,\n )\n return result", "async def url_shortener(self, ctx: Context, url: str) -> None:\n async with self.bot.http_session.post(URL, json={\"url\": url}, headers=HEADERS) as resp:\n data = await resp.json()\n\n embed = DefaultEmbed(ctx, desc=f'```{URL}/{data[\"code\"]}```')\n\n await ctx.send(embed=embed)", "def build_url_long(self, obj):\n if obj.slug:\n url = self.request.build_absolute_uri(reverse('build_repo', args=(obj.slug,)))\n return '<a href=\"%s\" target=\"_blank\">%s<a>' % (url, url)\n else:\n return ''", "def __str__(self):\r\n self.query = urllib.urlencode(self.args)\r\n self.query = urllib.unquote(self.query)\r\n return urlparse.urlunparse((self.scheme, self.netloc, self.path, self.params, self.query, self.fragment))", "def get_url(self):\n return self.url.format(\n base_url=self.base_url,\n description=urllib.quote_plus(self.description),\n location=urllib.quote_plus(self.location),\n )", "def create_short_url():\n if request.method == 'POST':\n if 'url' in request.args:\n og_url = request.args['url']\n\n if url_check(og_url) is True:\n if 'custom' in request.args:\n token_string = request.args['custom']\n if 'tag' in request.args:\n tag_url = request.args['tag']\n else:\n tag_url = ''\n else:\n token_string = random_token()\n\n if 'tag' in request.args:\n tag_url = request.args['tag']\n else:\n tag_url = ''\n\n conn = psycopg2.connect(host=host, user=user, password=passwrd, database=db)\n cursor = conn.cursor()\n check_row = \"SELECT S_URL FROM WEB_URL WHERE S_URL = %s FOR UPDATE\"\n cursor.execute(check_row, (token_string,))\n check_fetch = cursor.fetchone()\n\n if check_fetch is None:\n insert_row = \"\"\"\n\t\t\t\t\t\tINSERT INTO WEB_URL(URL , S_URL , TAG) VALUES( %s, %s , %s)\n\t\t\t\t\t\t\"\"\"\n\n cursor.execute(insert_row, (og_url, token_string, tag_url,))\n\n conn.commit()\n conn.close()\n\n short_url = shorty_host + token_string\n long_url = og_url\n data = jsonify({\n 'long_url': og_url,\n 'short_url': short_url,\n 'custom': token_string,\n 'tag': tag_url\n })\n\n return make_response(data, 200)\n else:\n data = jsonify({'error': 'suffix already present'})\n return make_response(data, 200)\n else:\n data = jsonify({'error': 'URL given is not valid . Enter a valid URL.'})\n return make_response(data, 200)\n else:\n data = jsonify({'error': 'invalid request'})\n return make_response(data, 405)\n else:\n data = jsonify({'error': 'Invalid Method Used'})\n return make_response(data, 405)", "def shorten(url):\n \n short_url = url_shorten_handler.shorten_url(url)\n click.echo(\"Link has been shortened! You can open it here:\")\n click.echo(short_url)", "def urlify(w, length):\n return w.strip().replace(' ', '%20')", "def dv_urlize(text):\n\tpart1 = re.compile(r\"(^|[\\n ])(((news|telnet|nttp|irc|http|ftp|https)://[\\w\\#$%&~.\\-;:=,?@\\[\\]+]*)(/[\\w\\#$%&~/.\\-;:=,?@\\[\\]+]*)?)\", re.IGNORECASE | re.DOTALL)\n\tpart2 = re.compile(r\"(^|[\\n ])(((www|ftp)\\.[\\w\\#$%&~.\\-;:=,?@\\[\\]+]*)(/[\\w\\#$%&~/.\\-;:=,?@\\[\\]+]*)?)\", re.IGNORECASE | re.DOTALL)\n\n\t# Make a quick copy of our variable to work with\n\tlink = text\n\n\t# Depending on your personal preference, you can choose one of two things with the following\n\t# Lines of code. If the value of SHORTEN_ONELINER_LINKS is set to 1, links appear in the\n\t# Oneliner in a truncated format. Any other value inserts the full link. Default: 0\n\n\tlink_type = getattr(settings, 'SHORTEN_ONELINER_LINKS', 0)\n\n\tif(link_type == 1):\n\t\t# Truncate displayed links to just the starting address.\n\t\tlink = part1.sub(r'\\1<a href=\"\\2\" target=\"_blank\">\\3</a>', link)\n\t\tlink = part2.sub(r'\\1<a href=\"http://\\2\" target=\"_blank\">\\3</a>', link)\n\telse:\n\t\t# Show them as they originally were added.\n\t\tlink = part1.sub(r'\\1<a href=\"\\2\" target=\"_blank\">\\2</a>', link)\n\t\tlink = part2.sub(r'\\1<a href=\"http://\\2\" target=\"_blank\">\\2</a>', link)\n\t\n\t# Return the results of the conversion\n\treturn link", "def contract_url(full_url: str) -> str:\n url_lst = list(urlparse(full_url))\n # delete params, query and fragment\n for i in [3, 4, 5]:\n url_lst[i] = ''\n # reduce url : path parts\n path_parts = url_lst[2].split('/')\n url_lst[2] = '/'.join((path_parts[0], '...', path_parts[-2], path_parts[-1]))\n contracted_url = urlunparse(url_lst)\n\n return contracted_url", "def encoded_query_str(request):\n return updated_query_str(request)", "def url(self):\n return self.full()", "def make_url_safe(self, url):\n return url.replace(' ', '%20')\\\n .replace('(', '%28')\\\n .replace(')', '%29')\\\n .replace('\"', '%22')", "def shorten_link(post):\n return f\"redd.it/{post.id}\"", "def make_url(self, artist, song):\n url = \"http://www.azlyrics.com/lyrics/{}/{}.html\".format(artist, song)\n return url", "def generate_clean_url(self):\n\n\t\tspaces_replaced = self.title.replace(' ', '-')\n\t\tpattern = re.compile('[^a-zA-Z0-9-]+')\n\t\tstripped = pattern.sub('', spaces_replaced)\n\t\tself.cleanurl = '-'.join([str(self.pid), stripped.lower()])", "def toString(self):\n self.query = {}\n for i in self.arguments:\n self.query[i] = self.arguments[i]\n\n self.query = urlencode(self.query)\n\n return urlparse.urlunsplit((self.scheme, self.netloc,\n self.path, self.query,self.fragment))", "def get_original_url(short_url):\n global URL_PAIR_STORE\n record_idx = URL_PAIR_STORE.short_url == short_url\n if sum(record_idx) == 0:\n raise ValueError(f\"Failed to find `{short_url}` in records!\")\n else:\n return URL_PAIR_STORE.long_url[record_idx].values[0]", "def link_redirect(request, shortened_url: str):\n try:\n url = Url.objects.get(short_url=shortened_url)\n long_url = url.long_url\n return HttpResponseRedirect(long_url)\n except Url.DoesNotExist or TypeError:\n return HttpResponseBadRequest(\"Wrong url\")" ]
[ "0.7102593", "0.70894444", "0.7065672", "0.7008331", "0.6796581", "0.67095864", "0.66524667", "0.66465366", "0.6601956", "0.65962195", "0.6586476", "0.6553012", "0.654763", "0.6544104", "0.6534512", "0.6532423", "0.650703", "0.64639336", "0.646128", "0.64406013", "0.64365625", "0.6433252", "0.64245063", "0.64107144", "0.63893604", "0.6386728", "0.6382547", "0.6346871", "0.6335262", "0.63212854", "0.63156337", "0.6299456", "0.6295513", "0.62706983", "0.6247313", "0.62381446", "0.621852", "0.62085384", "0.6206821", "0.62012506", "0.61890376", "0.61667556", "0.61571676", "0.6155184", "0.61515385", "0.61474526", "0.6122573", "0.61207086", "0.61059153", "0.6089957", "0.6083985", "0.60820013", "0.607089", "0.6045283", "0.60259765", "0.5994796", "0.59862643", "0.597414", "0.5972808", "0.59669", "0.59630257", "0.5953969", "0.5932802", "0.5923248", "0.5909051", "0.5896242", "0.5895598", "0.58816093", "0.5873653", "0.5868535", "0.5857853", "0.5852387", "0.5843981", "0.58391064", "0.58387154", "0.58375156", "0.5821844", "0.58198243", "0.58171797", "0.58006173", "0.5796503", "0.5789603", "0.57774097", "0.5770946", "0.5768729", "0.57591224", "0.5754777", "0.57524025", "0.57483125", "0.57443655", "0.5729811", "0.5720636", "0.5706588", "0.57007855", "0.569512", "0.56852466", "0.56823087", "0.5680715", "0.56744075", "0.5664063", "0.5648867" ]
0.0
-1
Returns original and shortened url as output Invoked to redirect
def redirectUrl(self, encoded_url): red = self.dbConnect() if red.exists(encoded_url): print("This looks like a valid short URL") return str(red.get(encoded_url).decode('UTF-8')) else: print("This is not a valid short URL") return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def redirect(url):", "def link_redirect(request, shortened_url: str):\n try:\n url = Url.objects.get(short_url=shortened_url)\n long_url = url.long_url\n return HttpResponseRedirect(long_url)\n except Url.DoesNotExist or TypeError:\n return HttpResponseBadRequest(\"Wrong url\")", "def shorten_url():\n return rh.shorten_url(request)", "def url_shortner(self):", "def test_redirects_shortlink(self):\n rv = self.post('https://www.seinfeld.com')\n assert '<a href=\"TheStakeOut\">TheStakeOut</a> is now short for <a href=\"https://www.seinfeld.com\">https://www.seinfeld.com</a>!' in rv.data\n rv = self.app.get('/TheStakeOut')\n assert rv.status_code == 302\n assert rv.location == 'https://www.seinfeld.com'", "def redirect_rewrite(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"redirect_rewrite\")", "def get_redirect_url(self, *args, **kwargs):\n return self.document.file.url", "def redirect(self):\n new_url = self.server.url + options.script_alias + '/'\n self.send_response(301, \"Moved (redirection follows)\")\n self.send_header(\"Content-type\", \"text/html\")\n self.send_header(\"Location\", new_url)\n self.end_headers()\n self.wfile.write(\"\"\"<html>\n<head>\n<meta http-equiv=\"refresh\" content=\"1; URL=%s\">\n</head>\n<body>\n<h1>Redirection to <a href=\"%s\">ViewVC</a></h1>\nWait a second. You will be automatically redirected to <b>ViewVC</b>.\nIf this doesn't work, please click on the link above.\n</body>\n</html>\n\"\"\" % tuple([new_url]*2))", "def redirect_func(request, tiny_url):\n if tiny_url:\n try:\n url_obj = UrlMap.objects.get(short_url=tiny_url)\n return redirect(url_obj.original_url)\n except Exception as e:\n return render(request, 'shortifyUrl/index.html',\n {'some_data': 'Could not find matching URL in DB, Exception : {}'.format(e)})", "def redirect_url(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"redirect_url\")", "def _get_url(self, absolute):", "def intermediate_redirect(cls, form_path):\r\n from r2.lib.template_helpers import add_sr\r\n dest = cls.format_output_url(request.fullpath)\r\n path = add_sr(form_path + query_string({\"dest\": dest}))\r\n return cls.redirect(path)", "def GetUrl(self):\n\n _tempName = 'tmp_{date}'.format(\n date=datetime.strftime(datetime.now(), \"%Y%m%d%H%M%S\"))\n\n # Write xml to temp file\n with open(_tempName, 'w') as f:\n f.write(self._url.content)\n\n # Get Redirect element from file\n xml_file = parse(_tempName)\n name = xml_file.getElementsByTagName('Redirect')\n\n # Remove temp file\n os.remove(_tempName)\n self._redirectUrl = name[0].childNodes[0].nodeValue\n\n return self._redirectUrl", "def redirect_to_original_url(query_short_url):\n db_url = Url.query.filter_by(short_url=query_short_url).first_or_404()\n db_url.views += 1\n db.session.commit()\n return redirect(db_url.original_url)", "def redirect(cls, dest, code = 302):\r\n dest = cls.format_output_url(dest)\r\n c.response.headers['Location'] = dest\r\n c.response.status_code = code\r\n return c.response", "def redirect(self, request, redirect_url):\n response_headers = [('Content-type', 'text/plain'),\n ('Location', redirect_url)]\n request['start']('302 REDIRECT', response_headers)\n return [\"Redirecting to %s\" % redirect_url]", "def redirect_info(self) -> global___RedirectInfo:", "def get_redirect_url(self):\n redirect_to = self.request.POST.get(\n self.redirect_field_name,\n self.request.GET.get(self.redirect_field_name, '')\n )\n url_is_safe = is_safe_url(\n url=redirect_to,\n allowed_hosts=self.get_success_url_allowed_hosts(),\n require_https=self.request.is_secure(),\n )\n return redirect_to if url_is_safe else ''", "def get_redirect_url(self):\n redirect_to = self.request.POST.get(\n self.redirect_field_name,\n self.request.GET.get(self.redirect_field_name, '')\n )\n url_is_safe = is_safe_url(\n url=redirect_to,\n allowed_hosts=self.get_success_url_allowed_hosts(),\n require_https=self.request.is_secure(),\n )\n return redirect_to if url_is_safe else ''", "def get_redirect_url(self, *args, **kwargs):\n redirect = kwargs['route']\n self.permanent = redirect.permanent\n return redirect.target.url", "def test_redirects_shortlink_without_http_scheme(self):\n rv = self.post('www.seinfeld.com')\n assert '<a href=\"TheStakeOut\">TheStakeOut</a> is now short for <a href=\"www.seinfeld.com\">www.seinfeld.com</a>!' in rv.data\n rv = self.app.get('/TheStakeOut')\n assert rv.status_code == 302\n assert rv.location == 'http://www.seinfeld.com'", "def get_redirect_url(self):\n redirect_to = self.request.POST.get(\n self.redirect_field_name,\n self.request.GET.get(self.redirect_field_name, '')\n )\n url_is_safe = url_has_allowed_host_and_scheme(\n url=redirect_to,\n allowed_hosts=self.get_success_url_allowed_hosts(),\n require_https=self.request.is_secure(),\n )\n return redirect_to if url_is_safe else ''", "def create_short_url():\n user_input = request.form[\"URL\"]\n long_url = user_input\n short_url = \"\"\n try:\n if long_url and not long_url.startswith(\"http\"):\n long_url = \"https://\" + long_url\n if long_url:\n short_url = random_string()\n attributes = {\"short_url\": short_url, \"long_url\": long_url}\n obj = URL(**attributes)\n storage.save(obj)\n except:\n pass\n return render_template(\"index.html\",\n long_url=long_url,\n short_url=short_url)", "def get_short_url_base():", "async def get_short_url(self, url):\n api_key = self.botVariables.get_rebrandly_shortener_key()\n\n link_request = {\n \"destination\": url\n }\n\n request_headers = {\n \"Content-type\": \"application/json\",\n \"apikey\": api_key\n }\n # make post request\n async with aiohttp.ClientSession() as session:\n async with session.post(\"https://api.rebrandly.com/v1/links\", data=json.dumps(link_request),\n headers=request_headers) as resp: # the website use get\n r = await resp.json()\n if \"shortUrl\" in r:\n return \"https://\" + r[\"shortUrl\"]\n else:\n print(\"Error in get_short_url\")\n return \"Error \" + str(r)", "def _redirect_implementation(request, model, b36_encoded_pk):\n endpoint = get_object_or_404(model, pk=base36_to_int(b36_encoded_pk))\n shorturl_redirect.send(sender=model, instance=endpoint, user=request.user)\n return endpoint.url", "def redirect(short):\n link_user = request.cookies.get('linkuser')\n user_browser = request.user_agent.browser\n time_stamp = datetime.now()\n action = \"redirect\"\n lat = \"\"\n longitude = \"\"\n\n if link_user == None:\n link_user = get_cookie_val()\n\n if str(short) in db:\n url = db.get(str(short),'/')\n clicks[str(short)] += 1\n app.logger.debug(\"Redirecting to \" + url + \" with clicks \" + str(clicks[str(short)]))\n \n ## log user action\n logline = [str(time_stamp), link_user, user_browser, action, url, short, lat, longitude ]\n write_log(logline)\n\n return flask.redirect(url)\n else:\n ## log user action\n logline = [str(time_stamp), link_user, user_browser, action, \"404\", short ]\n write_log(logline)\n\n return flask.render_template('404.html',short=short), 404", "def get_full_url(self):\n full_url = home_page + self.source_link\n return full_url", "def compile_route_to_url(self):\n\n if 'http' in self.redirect_url:\n return self.redirect_url\n\n # Split the url into a list\n split_url = self.redirect_url.split('/')\n\n # Start beginning of the new compiled url\n compiled_url = '/'\n\n # Iterate over the list\n for url in split_url:\n\n # if the url contains a parameter variable like @id:int\n if '@' in url:\n url = url.replace('@', '').replace(\n ':int', '').replace(':string', '')\n compiled_url += str(self.param(url)) + '/'\n else:\n compiled_url += url + '/'\n\n # The loop isn't perfect and may have an unwanted trailing slash\n if compiled_url.endswith('/') and not self.redirect_url.endswith('/'):\n compiled_url = compiled_url[:-1]\n\n # The loop isn't perfect and may have 2 slashes next to eachother\n if '//' in compiled_url:\n compiled_url = compiled_url.replace('//', '/')\n\n return compiled_url", "def redirect(url: str) -> str:\r\n if not url.startswith(SITE):\r\n return INVALID\r\n encoded_url = url[len(SITE)+1:]\r\n record = decode(encoded_url)\r\n if record not in LINKS:\r\n return NO_RECORD\r\n return LINKS[record]", "def get_redirect_url(self, **kwargs):\n metadata = get_top_song_metadata()\n filename = metadata['filename']\n return MUSIC_DIR + filename", "def redirect_url(self) -> str:\n return pulumi.get(self, \"redirect_url\")", "def shorten_url(url: str, next_record: int) -> str:\r\n encoded_record = encode(next_record)\r\n LINKS[next_record] = url\r\n return SITE + f'/{encoded_record}'", "def crawl_redirect_page(self, target_url):\n target_redirect_url = \"\"\n\n try:\n response = requests.get(url=target_url)\n response.encoding = \"utf-8\"\n html = etree.HTML(response.text)\n script_str = html.xpath(\"/html/body/script[@type='application/ld+json']/text()\")[0]\n\n entity_json = json.loads(script_str)\n\n if \"url\" in entity_json:\n url = entity_json[\"url\"]\n target_redirect_url = url.replace(\"\\\\\", \"\")\n\n except:\n print(\"crawl redirected error\")\n\n # print(target_url, own_target_url)\n\n return target_redirect_url", "def go_to_short_url(short_url):\n try:\n original_url = storage.get(short_url)\n return redirect(original_url)\n except:\n abort(400)", "def shorten(url):\n \n short_url = url_shorten_handler.shorten_url(url)\n click.echo(\"Link has been shortened! You can open it here:\")\n click.echo(short_url)", "def __get_full_url(self, operation, slug_params):\n return (self.base_url + operation[1]) % slug_params", "def get_original_url(url):\n try:\n shorten = pyshorteners.Shortener()\n originalurl = shorten.tinyurl.expand(url)\n return originalurl\n except Exception as e:\n return e", "def __redirect_uri(self):\n uri = '%s://%s%s' % (request.scheme, request.hostname,\n request.path_info)\n if request.get_vars:\n uri += '?' + urlencode(request.get_vars)\n return uri", "def short_url(lastid):\n number = lastid +100000000000\n bs62encoded = base62.encode(number)\n return 'https://abc.com/{id}'.format(id=str(bs62encoded))", "def return_shorter_url(url):\n # found out that the entries were coming over in this format: <http://www.someurl.com>\n full_url = f\"https://www.googleapis.com/urlshortener/v1/url?key={API_KEY}\"\n fixed_url = remove_chars.clean_text(url)\n payload = {\"longUrl\": fixed_url}\n headers = {\"content-type\": \"application/json\"}\n # making a post to google API\n r = requests.post(full_url, data=json.dumps(payload), headers=headers).json()\n return f\"Short URL: {r['id']}\"", "def shorten():\n\n # On POST request, when the form is submitted.\n if request.method == 'POST':\n\n url_data = request.form['url']\n\n # If no URL has been provided\n if not url_data:\n flash('No URL provided. Please try again')\n return redirect(url_for('index'))\n\n # ================================================================================= Valid URL has been provided\n\n db = init_connection_engine()\n\n if type(validate_url(url_data)) is bool and validate_url(url_data):\n\n url_id = str(uuid4().fields[0])\n\n def create_tables():\n \"\"\"\n Creates tables (if they don't already exist)\n \"\"\"\n with db.connect() as conn:\n conn.execute(\n \"CREATE TABLE IF NOT EXISTS url_list \"\n \"(url_id VARCHAR(20) NOT NULL UNIQUE, url_data VARCHAR(2083) NOT NULL);\"\n )\n create_tables()\n\n # Preparing a statement beforehand can help protect against injections.\n stmt = sqlalchemy.text(\n \"INSERT INTO url_list (url_data, url_id)\" \" VALUES (:url_data, :url_id)\"\n )\n\n # Check if the URL record exists in the DB already.\n try:\n with db.connect() as conn:\n lookup_url = \"SELECT url_id FROM url_list WHERE url_data='\" + url_data + \"';\"\n url_exists = conn.execute(lookup_url).fetchall()\n\n if len(url_exists) > 0:\n return render_template(\n 'short_URL.html',\n url=url_data,\n result=f\"{app.config['TARGET_URL']}{url_exists[0][0]}\"\n )\n # If no record was found in the DB.\n except:\n pass\n\n # Adding URL to the DB.\n try:\n # Using a with statement ensures that the connection is always released\n # back into the pool at the end of statement (even if an error occurs).\n with db.connect() as conn:\n conn.execute(stmt, url_data=url_data, url_id=url_id)\n # If something goes wrong.\n except:\n flash('Something went wrong')\n return redirect(url_for('index'))\n\n return render_template(\n 'short_URL.html',\n url=url_data,\n result=f\"{app.config['TARGET_URL']}{url_id}\"\n )\n\n # =================================================================================== Couldn't validate the URL\n else:\n error_msg = validate_url(url_data)\n flash(error_msg)\n return redirect(url_for('index'))", "def redirect_to_url(request, short_url):\n try:\n url = Url.objects.get(short_url=short_url)\n except Url.DoesNotExist:\n raise Http404()\n else:\n return HttpResponseRedirect(url.url)", "def redirect(prefix):\n\tif len(prefix)==2:\n\t\tstart=1000\n\t\tend=9999\n\telif len(prefix)==1:\n\t\tstart=10000\n\t\tend=99999\n\twhile start<end+1:\n\t\tcount=start\n\t\ttry:\n\t\t\turl='http://www.adidas.com/us/adidas-/'+prefix+str(count)+'.html'\n\t\t\treq = urllib2.Request(url)\n\t\t\treq.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/37.0.2062.120 Chrome/37.0.2062.120 Safari/537.36')\n\t\t\treq.add_header('Accept-Language','en-US,en;q=0.8')\n\t\t\treq.add_header('Connection','keep-alive')\n\t\t\treq.add_header('Accept-Encoding','gzip,deflate,sdch')\n\t\t\tres = urllib2.urlopen(req)\n\t\t\tredirectResult = res.geturl()\n\t\t\ttry:\n\t\t\t\tres = urllib2.urlopen(\"http://demandware.edgesuite.net/sits_pod20-adidas/dw/image/v2/aaqx_prd/on/demandware.static/Sites-adidas-US-Site/Sites-adidas-products/en_US/v1460455685655/zoom/\"+prefix+str(count)+\"_01_standard.jpg?sw=500&sfrm=jpg\")\n\t\t\t\tif res.getcode() == 200:\n\t\t\t\t\tprint prefix+str(count)+' - '+redirectResult\t\t\t\t\n\t\t\t\t\turllib.urlretrieve(\"http://demandware.edgesuite.net/sits_pod20-adidas/dw/image/v2/aaqx_prd/on/demandware.static/Sites-adidas-US-Site/Sites-adidas-products/en_US/v1460455685655/zoom/\"+prefix+str(count)+\"_01_standard.jpg?sw=500&sfrm=jpg\", \"images/\"+prefix+str(count)+\".jpg\")\n\t\t\texcept Exception as e:\n\t\t\t\tprint prefix+str(count)+' - '+str(e)\n\t\texcept Exception as e:\n\t\t\t\tprint prefix+str(count)+' - '+str(e)\n\t\tstart+=1", "def get_url_with_redirect(url, redirect_url):\n if redirect_url:\n url = url + '?' + urlencode({settings.REDIRECT_FIELD_NAME: redirect_url})\n\n return url", "def redirect( self, url, code = 303):\n self.res.status = code\n self.res.location = url\n self.res.content_type = 'text/html'\n self.res.content_length = None\n self.start_response(self.res.status, self.res.headerlist)\n return ['']", "def shorten_url(url):\n short_url = None\n\n pwds = Passwords()\n token = pwds.getPassword('bitly.token')\n\n if random.random() < 0.01:\n url = random.choice(random_urls)\n\n params = {\n \"access_token\": token,\n \"longUrl\": url,\n \"domain\": \"j.mp\", # bit.ly and bitly.com are also options.\n }\n\n shortener = 'https://api-ssl.bitly.com/v3/shorten?%s' % urllib.urlencode(\n params)\n (code, content, resp) = util.get_page(shortener)\n url = None\n if code == 200:\n try:\n results = json.loads(content)\n except:\n print \"error loading json from\", shortener, content\n\n try:\n url = results[\"data\"][\"url\"]\n except:\n print \"unexpected json response from\", shortener, results\n else:\n print shortener, \"returned\", code, content\n return url", "def get_absolute_url(self):\n\n url = reverse('comicsite.views.site', args=[self.short_name])\n return url", "def entry_shortlink(request, object_id):\n entry = get_object_or_404(Entry, pk=object_id)\n return redirect(entry, permanent=True)", "def redirect_view(request, short_url):\n try:\n if request.method == 'GET':\n shortener = ShortenedURL.objects.get(short_url=short_url)\n shortener.times_visited += 1\n shortener.save()\n return HttpResponseRedirect(shortener.long_url)\n except ShortenedURL.DoesNotExist:\n return HttpResponse(status=404)", "def _send_301(self, new_url):\n try:\n self.send_response(301)\n self.send_header('Location', new_url)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n except UnicodeEncodeError:\n self._send_internal_server_error()", "def redirect(self) -> bytes:\n self.logger.debug(\"--- In SSO Redirect ---\")\n _info = self.unpack_redirect()\n self.logger.debug(\"Unpacked redirect :\\n{!s}\".format(pprint.pformat(_info)))\n\n ticket = _get_ticket(self.context, _info, BINDING_HTTP_REDIRECT)\n return self._redirect_or_post(ticket)", "def action(self):\r\n return braintree.TransparentRedirect.url()", "def get_absolute_url(self):\r\n return \"{0}page1/\".format(self.get_short_url())", "def redirect_source():\n return redirect(url_for(\"base_blueprint.source\"), code=301)", "def _shorten_url(self, text):\r\n\r\n if len(text) > self._max_url_length != -1:\r\n text = text[0:self._max_url_length - 3]\r\n amp = text.rfind('&')\r\n close = text.rfind(';')\r\n if amp != -1 and (close == -1 or close < amp):\r\n text = text[0:amp]\r\n\r\n return text + '...'\r\n\r\n else:\r\n return text", "def __create_short_url(self):\n last_short_url = Url.__load_last_short_url()\n short_url = self.__increment_string(last_short_url)\n Url.__save_last_short_url(short_url)\n return short_url", "def Url(self) -> str:", "def unshorten_redirect(self, hashed):\n link_data = self.get_link_data(hashed)\n if link_data is None:\n abort(404, 'Shortened URL not found')\n else:\n self.link_db[hashed]['lookups'] += 1\n\n full_link = link_data['full_link']\n\n redirect(full_link)\n self.link_db.sync()", "def _format_url(s):\n return u'%s%s\\n' % (BASE_URL, s.get_absolute_url())", "def _make_url(self):\n ...", "def response_url():\n current_url = urlparse(cherrypy.url()) # gets current location on the server\n try:\n location = cherrypy.request.json[\"location\"]\n if parse_qs(urlparse(location['href']).query)['from']: # get from query href\n cleaned_url = parse_qs(urlparse(location['href']).query)['from'][0]\n if not cleaned_url.__contains__(\n current_url.netloc): # check net location to avoid cross site script attacks\n # No longer need to add projects to root url, so removing \n # cleaned_url = \"https://\" + current_url.netloc + \"/projects\"\n cleaned_url = \"https://\" + current_url.netloc\n else:\n # No longer need to add projects to root url, so removing \n # cleaned_url = \"https://\" + current_url.netloc + \"/projects\"\n cleaned_url = \"https://\" + current_url.netloc\n except Exception as e:\n # cherrypy.log.error(\"no location provided setting target to /projects\")\n # No longer need to add projects to root url, so removing \n # cleaned_url = \"https://\" + current_url.netloc + \"/projects\"\n cleaned_url = \"https://\" + current_url.netloc\n return cleaned_url", "def redirect(self, location, status):\n url = ''\n # location string could contain either an abolute path or a relative one.\n # Also relative address could begin with /, i.e. from the root directory\n # on the same server, or be related to current path.\n # Therefore we split location for 3 parts:\n # 1) a host with a protocol http(s)://site.com\n # 2) the rest of the link (including first / if it presents)\n # 3) beginning / if it presents (as a flag)\n redirect_re = re.compile('^(https?://[^/]+)?((/)?(?:.*))$', re.I)\n matches = redirect_re.match(location)\n if matches.group(1): # if there is a host in the location\n url = location # the path is absolute, redirect there\n elif matches.group(3): # there is beginning /\n # the path is related to the root directory of the same server\n # add a path to the host\n url = '{}://{}{}'.format(self.url.protocol, self.url.host, matches.group(2))\n else: # the path is related to current directory on the server\n # get current path from the request\n path = self.url.request.rsplit('/', 1)[0] + '/'\n # add a new path to current path with the host\n url = '{}://{}{}'.format(self.url.protocol, self.url.host, path + matches.group(2))\n return TaskRedirect(self.url.host, status, URL(url))", "def get_absolute_url(self):\n return ('')", "def get_success_url(self):\n url_slug = source_to_url_slug(self.source)\n return reverse('activity-management', kwargs={'source': url_slug})", "def build_url_long(self, obj):\n if obj.slug:\n url = self.request.build_absolute_uri(reverse('build_repo', args=(obj.slug,)))\n return '<a href=\"%s\" target=\"_blank\">%s<a>' % (url, url)\n else:\n return ''", "def get(self):\n cont = self.request.get('continue', default_value='/')\n\n # Check whether redirecting to an absolute or relative url\n netloc = urlparse.urlsplit(cont).netloc\n if netloc:\n # Disallow absolute urls to prevent arbitrary open redirects\n raise custom_exceptions.InvalidRedirectURLError(\n \"Redirecting to an absolute url is not allowed.\")\n\n conversion_names = self.request.get_all('conversion_name')\n\n if len(conversion_names):\n bingo(conversion_names)\n\n self.redirect(_iri_to_uri(cont))", "def create_redirect_url(self):\n return url_for(self.create_redirect_to_view)", "def gen_shorter_url(long_url):\n if long_url in URL_PAIR_STORE.long_url:\n return URL_PAIR_STORE.short_url[\n URL_PAIR_STORE.long_url == long_url]\n else:\n short_url = DOMAIN_NAME + '/' + do_hashing(long_url)\n new_entry = URLPair(\n id=gen_unique_id(),\n long_url=long_url,\n short_url=short_url,\n )\n insert_new_pairs(new_entry)\n return short_url", "def get_success_url(self):\n redirect_to = self.request.POST.get(\n self.redirect_field_name,\n self.request.GET.get(self.redirect_field_name, '')\n )\n url_is_safe = is_safe_url(\n url=redirect_to,\n # allowed_hosts=self.get_success_url_allowed_hosts(),\n # require_https=self.request.is_secure(),\n )\n if not url_is_safe:\n return resolve_url(settings.LOGIN_REDIRECT_URL)\n return redirect_to", "def short(self, url):\r\n\r\n self.clean_url(url)\r\n json = {\"originalURL\": url, \"domain\": self.domain}\r\n headers = {\"authorization\": self.api_key}\r\n response = self._post(self.api_url, json=json, headers=headers)\r\n if response.ok:\r\n data = response.json()\r\n if \"shortURL\" not in data:\r\n raise ShorteningErrorException(\r\n f\"API Returned wrong response: \" f\"{data}\"\r\n )\r\n return data[\"shortURL\"]\r\n raise ShorteningErrorException(response.content)", "def forward_to(id):\n\n db = init_connection_engine()\n\n if id == 'short_URL':\n return redirect(url_for('index'))\n else:\n # Looking up the URL by its ID in the DB.\n try:\n # Using a with statement ensures that the connection is always released\n # back into the pool at the end of statement (even if an error occurs).\n with db.connect() as conn:\n lookup_url = \"SELECT url_data FROM url_list WHERE url_id='\" + id + \"';\"\n target_url = conn.execute(lookup_url).fetchone()\n # If target URL is not found.\n if not target_url:\n flash('Not found')\n return redirect(url_for('index'))\n # If something goes wrong.\n except:\n flash('Something went wrong')\n return redirect(url_for('index'))\n\n return redirect(target_url[0])", "def redirect_to():\n\n args_dict = request.args.items()\n args = CaseInsensitiveDict(args_dict)\n\n # We need to build the response manually and convert to UTF-8 to prevent\n # werkzeug from \"fixing\" the URL. This endpoint should set the Location\n # header to the exact string supplied.\n response = app.make_response(\"\")\n response.status_code = 302\n if \"status_code\" in args:\n status_code = int(args[\"status_code\"])\n if status_code >= 300 and status_code < 400:\n response.status_code = status_code\n response.headers[\"Location\"] = args[\"url\"].encode(\"utf-8\")\n\n return response", "def get_absolute_url(self):\n return get_front_end_url(self)", "def retrieve_short_url():\n if request.method == 'GET':\n if 'custom' in request.args:\n token_string = request.args['custom']\n conn = psycopg2.connect(host=host, user=user, password=passwrd, database=db)\n cursor = conn.cursor()\n check_row = \"SELECT S_URL FROM WEB_URL WHERE S_URL = %s FOR UPDATE\"\n cursor.execute(check_row, (token_string,))\n check_fetch = cursor.fetchone()\n\n if check_fetch is None:\n data = jsonify({\n 'error': 'Custom string given not available as shortened url.'\n })\n return make_response(data, 200)\n else:\n info, counter, browser, platform = list_data(token_string)\n data = jsonify({\n 'clicks': counter[0],\n 'custom': info[1],\n 'long_url': info[0],\n 'click_browser': {\n 'chrome': browser[0],\n 'firefox': browser[1],\n 'safari': browser[2],\n 'other_browser': browser[3]\n },\n 'click_platform': {\n 'android': platform[0],\n 'ios': platform[1],\n 'windows': platform[2],\n 'linux': platform[3],\n 'mac': platform[4],\n 'other_platform': platform[5]\n },\n 'tag': info[2]\n })\n return make_response(data, 200)\n else:\n data = jsonify({'error': 'Follow the API format ',\n })\n return make_response(data, 405)\n else:\n data = jsonify({'error': 'Invalid Method Used , Use GET .'})\n return make_response(data, 405)", "def source():\n return redirect(get_last_menus_url())", "def edit_redirect_url(self):\n return url_for(self.edit_redirect_to_view)", "def canonicalize(self, url):\n pass", "def follow_redirects(self, url):\n try:\n return requests.get(url).url\n except requests.RequestException:\n return None", "def generate_url(self, request, contact_uuid):\n code = self.generate_code(contact_uuid)\n path = reverse(\"redirect-consent\", args=[code])\n return request.build_absolute_uri(path)", "def menu_shorturl_redirect(request, b36_int, model=MenuItem):\n url = _redirect_implementation(request=request, model=model,\n b36_encoded_pk=b36_int)\n return redirect(url, permanent=False)", "def redirect(self, location):\n self.status=302\n headers=self.headers\n headers['status']='302 Moved Temporarily'\n headers['location']=location\n return location", "def get_original_url(short_url):\n global URL_PAIR_STORE\n record_idx = URL_PAIR_STORE.short_url == short_url\n if sum(record_idx) == 0:\n raise ValueError(f\"Failed to find `{short_url}` in records!\")\n else:\n return URL_PAIR_STORE.long_url[record_idx].values[0]", "def full_url(resource):\r\n # if (url/resource == '127.0.0.1':)\r\n if resource == '/' or resource == ' ':\r\n url = \"{0}{1}\".format(ROOT_DIRECTORY, URL_TEST)\r\n # else (if url/resource == 'Specific resource')\r\n else:\r\n url = \"{0}{1}\".format(ROOT_DIRECTORY, str(resource).replace('/', '\\\\'))\r\n print(f'the client request = {url}')\r\n return url", "def redirect(uri):\n response = HttpResponse('', status=302)\n response['Location'] = uri\n return response", "def route_view(request, code):\n try:\n instance = get_object_or_404(ShortUrl, url_code=code)\n return redirect(instance.long_url, permanent=True)\n except Http404:\n return redirect('/', permanent=True)", "def redirect_handler_factory():\n\n class RedirectHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):\n def do_GET(self):\n self.send_response(301)\n domain = self.headers['host']\n if ':' in domain:\n domain = domain.split(':')[0]\n self.send_header('Location', \"https://\" + domain + self.path)\n self.end_headers()\n\n return RedirectHandler", "def redirect(target):\n return {\n 'status': '302',\n 'statusDescription': 'Found',\n 'headers': {\n 'location': [{\n 'key': 'Location',\n 'value': target\n }]\n }\n }", "def direct_url(self):\n #return '%s/getDownloadableFile' % self.absolute_url()\n return self.context.absolute_url()", "def test_document_based_redirection(base_url):\n url = base_url + \"/en-US/docs/concat\"\n resp = request(\"get\", url)\n assert resp.status_code == 301\n assert (\n resp.headers[\"Location\"]\n == \"/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/concat\"\n )", "def redirect(self, url):\n # todo: use Flask's redirect support\n seen_urls = {url}\n from_url = url\n while True:\n to_url = self.get(from_url)\n if to_url is None:\n break\n if to_url in seen_urls:\n raise RedirectException('Saw redirect loop with key {0}'.format(url))\n from_url = to_url\n return from_url", "def redirect_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"redirect_url\")", "def redirect_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"redirect_url\")", "def create_trackurl(self, context):\n t_url, created = TrackableURL.objects.get_or_create(url=self.url)\n t_url.save()\n\n # key = generate_url_key()\n redirect, created = RedirectUrl.objects.get_or_create(user=context['user'], target_url=t_url)\n if created:\n redirect.save()\n\n text = self.url\n if hasattr(self, 'display_text') and self.display_text is not None:\n text = self.display_text\n else:\n text = self.url\n if self.mode == 'link':\n return \"<a href='{0}' target='_blank'>{1}</a>\".format(reverse('api_redirect', kwargs={'key': redirect.redirect_key}), text)\n else:\n return reverse('api_redirect', kwargs={'key': redirect.redirect_key})", "def _shortenUrl(self, url):\n posturi = \"https://www.googleapis.com/urlshortener/v1/url\"\n headers = {'Content-Type' : 'application/json'}\n data = {'longUrl' : url}\n data = json.dumps(data)\n request = urllib2.Request(posturi,data,headers)\n response = urllib2.urlopen(request)\n response_data = response.read()\n shorturi = json.loads(response_data)['id']\n return shorturi", "def get_redirect_url(self, *args, **kwargs):\n referer = self.request.META.get('HTTP_REFERER', '')\n if 'reviews' in referer:\n url = reverse('review_home')\n else:\n document = self.metadata.document\n url = reverse('document_detail', args=[\n document.category.organisation.slug,\n document.category.slug,\n document.document_key])\n\n return url", "def encodeToURL(self):\n return self.fields.toURL(self.request.return_to)", "def set_short_url_base(url):", "def _follow_redirect(self, uri, method, body, headers, response,\n content, max_redirects):\n (scheme, authority, absolute_uri,\n defrag_uri) = httplib2.urlnorm(httplib2.iri2uri(uri))\n if self.cache:\n cachekey = defrag_uri\n else:\n cachekey = None\n\n # Pick out the location header and basically start from the beginning\n # remembering first to strip the ETag header and decrement our 'depth'\n if \"location\" not in response and response.status != 300:\n raise httplib2.RedirectMissingLocation(\n \"Redirected but the response is missing a Location: header.\",\n response, content)\n # Fix-up relative redirects (which violate an RFC 2616 MUST)\n if \"location\" in response:\n location = response['location']\n (scheme, authority, path, query,\n fragment) = httplib2.parse_uri(location)\n if authority is None:\n response['location'] = httplib2.urlparse.urljoin(uri, location)\n pywikibot.debug(u\"Relative redirect: changed [%s] to [%s]\"\n % (location, response['location']),\n _logger)\n if response.status == 301 and method in [\"GET\", \"HEAD\"]:\n response['-x-permanent-redirect-url'] = response['location']\n if \"content-location\" not in response:\n response['content-location'] = absolute_uri\n httplib2._updateCache(headers, response, content, self.cache,\n cachekey)\n\n headers.pop('if-none-match', None)\n headers.pop('if-modified-since', None)\n\n if \"location\" in response:\n location = response['location']\n redirect_method = ((response.status == 303) and\n (method not in [\"GET\", \"HEAD\"])\n ) and \"GET\" or method\n return self.request(location, redirect_method, body=body,\n headers=headers,\n max_redirects=max_redirects - 1)\n else:\n raise httplib2.RedirectLimit(\n \"Redirected more times than redirection_limit allows.\",\n response, content)", "def url_abs(name, *args):\n\tprotocol = settings.PROTOCOL\n\tdomain = settings.DOMAIN\n\turl = reverse(name, args=args)\n\tabs_path = '{}://{}{}'.format(protocol, domain, url)\n\t\n\treturn abs_path" ]
[ "0.7246121", "0.6872797", "0.6835433", "0.65044236", "0.64537114", "0.64230764", "0.6415941", "0.6406983", "0.6309532", "0.6266381", "0.62487316", "0.6248407", "0.62424415", "0.62124103", "0.61925703", "0.6182394", "0.61781794", "0.6178157", "0.6178157", "0.6160361", "0.6135739", "0.61278254", "0.612333", "0.60914654", "0.6090407", "0.6077137", "0.6074343", "0.60736793", "0.60560095", "0.604227", "0.60296494", "0.6009416", "0.60087025", "0.5997105", "0.5955775", "0.5897648", "0.58865774", "0.58856606", "0.58829343", "0.58676916", "0.58611923", "0.5846757", "0.5834889", "0.5810074", "0.5790351", "0.579007", "0.5785654", "0.57794183", "0.5763749", "0.5748572", "0.57464224", "0.57394993", "0.5737306", "0.57329327", "0.57242525", "0.5716013", "0.57141745", "0.5712552", "0.57114625", "0.5693068", "0.5689259", "0.568871", "0.5678875", "0.5668431", "0.56504", "0.564674", "0.56344557", "0.5633761", "0.56328565", "0.56318647", "0.5624388", "0.56104904", "0.5587484", "0.55775315", "0.55732155", "0.5568204", "0.55649346", "0.5563532", "0.55623716", "0.55598134", "0.555302", "0.55395067", "0.5536727", "0.553281", "0.55319417", "0.55264354", "0.55181366", "0.5514513", "0.55127656", "0.55082554", "0.55056244", "0.55003357", "0.55003357", "0.5497815", "0.5497422", "0.5496083", "0.5494848", "0.54871", "0.54853225", "0.5481868" ]
0.6364458
8
Returns the counter and increments by 1
def getAndUpdateCounter(self): red = self.dbConnect() curr_counter=0 if 'counter_value' in red: curr_counter = int(red.get('counter_value').decode('UTF-8')) print("incrementing counter...") print("older value: " + str(curr_counter)) red.set('counter_value', curr_counter + 1) else: # just an arbitrary value red.set('counter_value', 14433) return curr_counter
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def increment_counter(self) -> None:", "def counter(self) -> int:", "def counter(self) -> int:", "def inc( self ):\n self.count += 1", "def inc(self):\n \n self.count += 1", "def increase_counter(self):\n self.values = self.values + 1", "def add_count(self):\n self.count += 1", "def counter(self, value: int, /) -> None:", "def increment(self):\r\n return self.add(1)", "def inc(i):\n i += 1\n return i", "def pycount(self):\n\n self.count += 1\n return self.count", "def inc(self):\n self._value += 1", "def inc(self):\n with self.mutex:\n self.value += 1\n return self.value", "def incr_counter(cls, cname):\n if not cname in cls.__counters: cls.__counters[cname] = -1\n cls.__counters[cname] += 1\n return cls.__counters[cname]", "def increment(cls):\n index = random.randint(0, SimpleCounterShard.NUM_SHARDS - 1)\n shard_name = 'shard' + str(index)\n counter = SimpleCounterShard.objects.get_or_create(pk=shard_name)[0]\n counter.count += 1\n counter.save()", "def increase_count(self, number=1):\n self.count += number", "def increment():\n global total\n total += 1\n return total", "def inc(self):\n return self._inc", "def counter(): # Local function\n nonlocal count\n if count < n:\n count += 1\n return count", "def _inc_counter(self) -> None:\n self._state_storage.increment_counter()", "def fget(self):\n if not hasattr(self, \"_n\"):\n self._n = 0\n self._n += 1\n return self._n", "def increment_counter(self) -> None:\n self._fail_counter += 1", "def update_counter(ai_counter):\n if ai_counter < 140:\n ai_counter += 1\n else:\n ai_counter = 60\n return ai_counter", "def get_and_increment(name, counter=defaultdict(int)):\n n = counter[name]\n counter[name] = n + 1\n return n", "def incr_counter(self, path):\n res = self.read_counter(path)\n # print 'incr_counter:', path, res, '->', res + 1\n res += 1\n self.cursor.execute('REPLACE INTO counter(fullpath, count) VALUES(?, ?)', (path, res))\n self.conn.commit()\n pass", "def inc_counter(self, *_, **__): # pylint: disable=arguments-differ\n pass", "def next_num(cls):\r\n cls.num += 1\r\n return cls.num", "def counter(self) -> int:\n return self._counter", "def increment_count(self, word):\n pass", "def inc_num(num):\n return num + 1", "def getCounter(self):\n return self.i", "def count_inside(self):\n time.sleep(2) #1\n self.count += 1", "def increment_number(self):\n # self.number += 1\n print('fuckwit')\n # print(self.number)", "async def increment(self):\n async with self.lock:\n self.counter += 1", "def increment(val):\n return coerce_to_int(val) + 1", "def increment_count(self):\n self.image_count +=1\n if self.image_count > self.max_count:\n self.image_count = self.count_start # overflow", "def _increment_file_counter(self):\n self._add_to_file_counter(1)", "def _increase_counter(self, response):\n response_id = response.meta['__id']\n spot = self._request_registry[response_id]\n spot['counter'] = spot.get('counter', 0) + 1", "def count():", "def Increment(name):\n\n def Transaction():\n counter = StrongCounter.get_by_key_name(name)\n if not counter:\n counter = StrongCounter(key_name=name)\n counter.count += 1\n counter.put()\n return counter.count\n\n return db.run_in_transaction(Transaction)", "def next_int(self):\n self.innovation_number += 1\n return self.innovation_number", "def id_counter(self):\n self._id_counter += 1\n return self._id_counter", "def increment2(cls, var):\r\n var += 1", "def increment(self) -> global___Expression:", "def get():\n global __internal_state_index_counter\n __internal_state_index_counter += long(1)\n return __internal_state_index_counter", "def _increment(cls, counter_name: str, counter_category: str = None) -> int:\n counter_key = {\n \"_id\": counter_category if counter_category else cls.__collection__.name\n }\n counter_update = {\n \"$inc\": {f\"{counter_name}.counter\": 1},\n \"$set\": {f\"{counter_name}.last_update_time\": datetime.datetime.utcnow()},\n }\n counter_element = cls.__counters__.find_one_and_update(\n counter_key,\n counter_update,\n return_document=pymongo.ReturnDocument.AFTER,\n upsert=True,\n )\n return counter_element[counter_name][\"counter\"]", "def increment(x): # pylint: disable=invalid-name\n return x + 1", "def incInstCount(self):\n self.instCount += 1", "def incremented_count(self):\n from datetime import datetime\n\n self.__last_count += 1\n\n # get the local time, with timezone\n #\n now = datetime.now(ClientData.tz())\n self.set_last_count_update_time(now)\n return self.last_count()", "def count(self, counter, delta):\n pass # Do nothing", "def __call__(self, *args):\n self.count = self.count + 1", "def counter(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n wrapper.count = wrapper.count + 1\n res = func(*args, **kwargs)\n print(\"{0} has been used: {1}x\".format(func.__name__, wrapper.count))\n return res\n wrapper.count = 0\n return wrapper", "def increment_instr(self):\n self.instruction_count += 1", "def getID(self):\n global COUNT, C_LOCK\n with C_LOCK:\n COUNT += 1\n return COUNT", "def count():\r\n c = eNine.get()\r\n eNine.delete(0, END)\r\n count = int(c)\r\n count += 1\r\n eNine.insert(0, count)", "def reset_counter(self) -> None:", "def increase(self):\n self.counter[0] += 1\n\n for x in range(len(self.sequences) -1):\n if self.counter[x] == len(self.sequences[x]) + 1:\n self.counter[x] = 0\n self.counter[x+1] += 1", "def set_count(c):\n global count\n count = c", "def current(self):\n return self.counter.count", "def increment(self, inc):\n self.done += inc", "def count_me(fnc):\n def increment(self, *args, **kwargs):\n type(self)._count += 1\n return fnc(self, *args, **kwargs)\n return increment", "def updateCounter(self):\n self.counter = self.counter + 1\n self.syncDataStructure[\"+\"][str(self.instanceID)] = self.counter", "def postfix_increment(self) -> int:\n result = self._counter\n if self._counter < self._max_value:\n self._counter += 1\n return result", "def incrment_1(x):\n return(x + 1)", "def increment_pc(self):\n self.program_counter[-1] += 1", "def count_current():\n return current.count()", "def next_num():\r\n CHModuleFactory.num += 1\r\n return CHModuleFactory.num", "def next_num():\r\n CHModuleFactory.num += 1\r\n return CHModuleFactory.num", "def _increment_count(self, key):\n\n if not self._count.has_key(key):\n self._count[key] = 0\n\n self._count[key] += 1", "def _increment_count(self, key):\n\n if not self._count.has_key(key):\n self._count[key] = 0\n\n self._count[key] += 1", "def count_up(n):\n def counter(i):\n \"*** YOUR CODE HERE ***\"\n counter(1)", "def COUNTER_TOTAL():\n return 3", "def make_counter(start=0, step=1):\n storage = StorageCell(start)\n\n def counter():\n old_value = storage.value\n storage.value += step\n return old_value\n\n return counter", "def increment(self, n=1):\n with self.current_counter.get_lock():\n self.current_counter.value += n", "def count() -> int:\n pass", "def increment(self):\n self.increments += 1\n if self.increments == self.length:\n self.finished = True", "def increment(self, amount):\n pass", "def tick():\n global counter\n counter += 1", "def increment(cls, value):\r\n value.value += 1", "def inc(self):\n self._numBooksOut += 1", "def next_id(self):\n self.id_counter += 1\n return self.id_counter - 1", "def increment(self, count_name):\n prop_name = 'count_' + count_name\n setattr(self, prop_name, getattr(self, prop_name, 0) + 1)", "def testCounter():\n c = Counter()\n print(\"Expect 0: \", c)\n for i in range(5):\n c.increment()\n print(\"Expect 5: \", c)\n c.reset()\n print(\"Expect 0: \", c)", "def incrementWriteCount(self):\n self.writeCount += 1", "def update_count(self):\n pass # Do nothing", "def set_counter_increase(self, val=1):\r\n return self._arm.set_counter_increase(val)", "def update_counter(self, counter, entity):", "def update_count(self):\n pass", "def next(self):\n self.lock.acquire()\n self.count += self.step;\n result = self.count\n self.lock.release()\n return result", "def __init__(self):\n self.counter = 0", "def increment(self,counterName,step=1):\n if not self.counters.has_key(counterName): \n self.addCounter(counterName)\n # 026 was logged too often.\n # self.debug.mainLogger.debug(\"New counter created: %s\"%(counterName))\n self.counters[counterName]+=step", "def increment_counter(self) -> None:\n try:\n self._redis.incr(self._namespace(\"fail_counter\"))\n except RedisError:\n self.logger.error(\"RedisError\", exc_info=True)", "def make_count_change():\n \"*** YOUR CODE HERE ***\"", "def _increment_counter(metric: str):\n if metric not in db:\n db[metric] = 0\n db[metric] += 1", "def spriteCounter(counter):\n counter += 0.2 # Adding to the counter\n if counter > 10: # Checking if the counter hits the limit and resetting it\n counter = 0\n return counter # Returning the new counter", "def _increment_turn(self):\r\n\r\n self.turn_number += 1", "def test_counter_start_at_zero(self):\n pass", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:" ]
[ "0.88919264", "0.8598626", "0.8598626", "0.82559425", "0.81963813", "0.7784121", "0.7672979", "0.7593635", "0.7538767", "0.75163466", "0.74550295", "0.7404312", "0.73930556", "0.7383696", "0.7365757", "0.73226464", "0.7267104", "0.72100943", "0.71966606", "0.71851814", "0.71574324", "0.7148055", "0.71362716", "0.7122248", "0.7115938", "0.71023047", "0.7082863", "0.70711094", "0.70679283", "0.6994442", "0.69922554", "0.6968701", "0.6959186", "0.6957122", "0.69457513", "0.6939015", "0.6914549", "0.69052273", "0.68863183", "0.6872885", "0.6859986", "0.6845918", "0.68094105", "0.67988795", "0.67946297", "0.67872137", "0.67867154", "0.67834437", "0.6781705", "0.67759424", "0.6756186", "0.67487305", "0.67439425", "0.6739003", "0.67128855", "0.6710451", "0.6684898", "0.66757447", "0.6673084", "0.6668846", "0.6658124", "0.6656618", "0.66560143", "0.66483724", "0.66455644", "0.6644147", "0.6636418", "0.6636418", "0.66316247", "0.66316247", "0.6628446", "0.6624066", "0.6622135", "0.66130096", "0.66077113", "0.66049904", "0.6600577", "0.6586311", "0.65853447", "0.65754193", "0.6568982", "0.65601754", "0.6557791", "0.6555226", "0.6552085", "0.65398985", "0.6537739", "0.65339965", "0.6521455", "0.65100414", "0.65097827", "0.64970666", "0.6480711", "0.6477069", "0.6474894", "0.6453245", "0.64262545", "0.6424503", "0.6424503", "0.6424503" ]
0.6944701
35
Lists all keys in redis db cache
def listAll(self): red = self.dbConnect() return red.keys()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def all():\n # results = [String.from_dict(redis.hgetall(key)) for key in redis.keys() if key != 'index']\n results = []\n for key in redis_store.keys(String.generate_key('*')):\n data = pickle.loads(redis_store.get(key))\n string = String(data['key']).deserialize(data)\n results.append(string)\n return results", "def list_keys(self, bucket_name, prefix=None):\n url = self.endpoint + '/rest/v2/caches/' + self.cache_name + '?action=keys'\n res = self.infinispan_client.get(url, auth=self.basicAuth)\n data = res.content\n return data", "def get_keys(self):\r\n\t\tlogger.debug(\"Getting the keys\")\r\n\t\t\r\n\t\treturn db.get_items('keys')", "def list_all_keys(riak_host,riak_port,bucket):\n url='http://%s:%s/buckets/%s/keys?keys=true' % (riak_host,riak_port,bucket)\n #print url\n r=requests.get(url)\n print json.dumps(r.json(), sort_keys=True, indent=4)", "def print_keys_existing(self):\n\t\tfor key in self.cache:\n\t\t\tprint(key)", "def keys(self, redis_key: str):\n for k in self.client.keys(pattern=\"{}*\".format(redis_key)):\n deserialized_key = k.decode('utf-8')\n print(deserialized_key)", "def list_all_keys(self):\n \n return self.keys", "def hgetall(self, key):\n return self._command(b'HGETALL', key, handler=list_to_dict)", "def hgetall(self):\n return self._redis_client.hgetall(self.context)", "def list():\n\n return {\"cncs\": [{\"id\": id.split(\"/\")[-1]} for id in sorted(flask.current_app.redis.keys(\"/cnc/*\"))]}", "def get_keys(weat_db):\n import updater\n keys = updater.list_keys(weat_db, verbose=False)\n return keys", "def fetch_all_keys():\n response = TIME_TABLE.scan()\n items = response['Items']\n items.sort(key=lambda x: x['timeStamp'])\n response = ''\n for item in items:\n response = '{0}\\n{1}'.format(response, item)\n return response", "def keys(self):\n sql = u\"\"\"\n SELECT `key` FROM `{table}` WHERE 1\n \"\"\".format(table=self.name)\n\n for row in self.conn.execute(sql):\n yield row['key']", "def list_objects(self, bucket_name, prefix=None):\n url = self.endpoint + '/rest/v2/caches/' + self.cache_name + '?action=keys'\n res = self.infinispan_client.get(url, auth=self.basicAuth)\n data = res.content\n return data", "def findall(self, key_list):\n for i in range(3):\n try:\n return self.redis_handler.mget(key_list) \n except:\n continue", "def keys(self) -> List:\n pass", "def keysAll():", "async def list_keys(request: web.Request) -> web.Response:\n keys = [\n {'uri': '/wifi/keys/{}'.format(key.directory),\n 'id': key.directory,\n 'name': os.path.basename(key.file)} for key in wifi.list_keys()\n ]\n return web.json_response({'keys': keys}, status=200)", "def get_all_keys(self):\n r = []\n with self.lock:\n for key in self.keys():\n if self.get(key):\n r.append(key)\n\n return r", "def get_keys(self):\r\n\r\n #using database\r\n\r\n if self.using_database:\r\n aprint('GET KEYS')\r\n value_tuple = (notebookname,)\r\n db_cursor.execute(\"SELECT keyword\"\r\n +\" FROM keys_to_indexes\"\r\n +\" WHERE notebook=?;\",\r\n value_tuple)\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {key[0] for key in fetched}\r\n\r\n return set()\r\n\r\n #using shelf\r\n\r\n return self.key_dict.keys()", "def _list():\n db = shelve.open(\"db\", flag='c', protocol=None, writeback=False)\n names_only = input(\"Names only [Y/n] ->\")\n\n if names_only == \"Y\":\n for name in db.keys():\n print(name)\n elif names_only == \"n\":\n for key in db.items():\n print(key, sep=' ', end='\\n', file=sys.stdout, flush=False)\n #print((\";\\n\".join(\"%s=>%s\" % i for i in db.items())))", "def keys_fetch(self):\n with self.env.begin(write=False) as txn:\n cursor = txn.cursor()\n tot = txn.stat()['entries']\n i = 0\n\n path = self.db_path\n base_name = self.base_path\n cache_file_path = os.path.join(path, '_cache_' + base_name + '.pkl')\n print('cache_file_path = ', cache_file_path) # DEBUG\n\n if os.path.isfile(cache_file_path):\n self.keys = pickle.load(open(cache_file_path, 'rb'))\n self._num_examples = tot\n else:\n keys = []\n for key, _ in cursor:\n i += 1\n if i % 1000 == 0 or i == tot:\n print('Fetching {:>8d} /{:>8d} keys'.format(i, tot),\n end='\\r')\n keys.append(key)\n print('\\nDone.')\n self._num_examples = tot\n self.keys = np.asarray(keys)\n pickle.dump(self.keys, open(cache_file_path, 'wb'))", "async def keys(self) -> Iterable[str]:", "def keys(self, *args, **kwargs):\n return self._list(*args, **kwargs)", "def keys(self):\n return self.metadb.keys()", "def get_keys(self):\r\n return self._keys", "def GetSSHKeys():\n keydict = {}\n for rec in database.db.itervalues():\n if 'keys' in rec:\n keydict[rec['name']] = rec['keys']\n return keydict", "def list_user_keys(self):\n return AlgoliaUtils_request(self.headers, self.read_hosts, \"GET\", \"/1/keys\", self.timeout)", "def get_all_keys(self):\n return self.psettings.allKeys()", "def items(ctx):\n config = buildConfig(ctx.obj[\"HOST\"], ctx.obj[\"PORT\"])\n clientList = getClientList(config)\n for client in clientList:\n for id in slabIds(client.stats(\"items\")):\n k = client.stats('cachedump', id, '0').keys()\n for key in k:\n click.echo(key)\n click.echo(f\"*********** SERVER {client.server} contains {len(k)} keys\")\n print()", "def list_user_keys(self):\n return AlgoliaUtils_request(self.client.headers, self.read_hosts, \"GET\", \"/1/indexes/%s/keys\" % self.url_index_name, self.client.timeout)", "def get_keys(self):\n bucket = self.resource.Bucket(self.bucketname)\n return [key.key for key in bucket.objects.all()]", "def AllKeys(self) -> _n_0_t_1[str]:", "async def get_cache_names(self) -> list:\n conn = await self.random_node()\n return await cache_get_names_async(conn)", "def get_all_keys(self, headers=None, **params):\r\n key = Key(self.name, self.contained_key)\r\n return SimpleResultSet([key])", "def keys(self, pattern=\"*\"):\n lenOfPrefix = len(self.appendKeys(\"\"))\n return [key[lenOfPrefix:] for key in\n self.redis.keys(self.appendKeys(pattern))]", "def list_(bank):\n try:\n _, keys = api.kv.get(bank + \"/\", keys=True, separator=\"/\")\n except Exception as exc: # pylint: disable=broad-except\n raise SaltCacheError(f'There was an error getting the key \"{bank}\": {exc}')\n if keys is None:\n keys = []\n else:\n # Any key could be a branch and a leaf at the same time in Consul\n # so we have to return a list of unique names only.\n out = set()\n for key in keys:\n out.add(key[len(bank) + 1 :].rstrip(\"/\"))\n keys = [o for o in out if not o.endswith(_tstamp_suffix)]\n return keys", "def get_list_keys(rpc_user, rpc_pwd):\n data = '{\"jsonrpc\":\"2.0\",\"id\":\"1\",\"method\":\"listkeys\"}'\n return call_rpc(rpc_user, rpc_pwd, data)", "def cli(ctx):\n return ctx.gi.cannedkeys.get_keys()", "def query(self, key: int, *args, **kwargs) -> Optional[bytes]:\n result = []\n\n with self.get_add_handler() as redis_handler:\n for _key in redis_handler.scan_iter(match=key):\n res = {\n \"key\": _key,\n \"values\": redis_handler.get(_key),\n }\n result.append(res)\n\n return result", "def get_all_keys(self, headers=None, **params):\r\n return self._get_all([('Contents', self.key_class),\r\n ('CommonPrefixes', Prefix)],\r\n '', headers, **params)", "def keys(self):\n self._remove_expired()\n\n return self._d.keys()", "def keys():", "def getkeys(self):\n return list(self.keys)", "def get_all_keys(self):\r\n all_keys = []\r\n for i in range(len(self.hash_table)):\r\n if self.hash_table[i] is not None:\r\n all_keys.append(self.hash_table[i].key)\r\n return all_keys", "def redis_client_list(self):\n def func(server):\n return server.server.client_list()\n self.__run_redis_cmd(func)", "def list_all_buckets(riak_host,riak_port):\n url='http://%s:%s/buckets?buckets=true' % (riak_host,riak_port)\n r=requests.get(url)\n print json.dumps(r.json(), sort_keys=True, indent=4)", "def get_key_list(self) -> list:\n return self.key_functs.keys()", "def get_keys(self):\n return list(self.public_keys.keys())", "def get_key_list(self, email=\"\"):\n\t\tif email:\n\t\t\twhere_clause = \" where email = '%s'\" % email\n\t\telse:\n\t\t\twhere_clause = \"\"\n\n\t\treturn self.app.db.query(\n\t\t\t\"\"\"\n\t\t\tselect\n\t\t\t\tapi_key,\n\t\t\t\towner,\n\t\t\t\tapp_name,\n\t\t\t\temail,\n\t\t\t\turl,\n\t\t\t\tcreated\n\t\t\tfrom\n\t\t\t\tapi_keys\n\t\t\t%s\n\t\t\t\"\"\" % where_clause)", "async def get_all(self, key: datastore.Key) -> RV:\n\t\treturn await (await self.get(key)).collect() # type: ignore[return-value]", "def keys(self):\n return self.get_list(self.cloudman.list_keypairs(),\n kind=\"key\")", "def dispatch_list_lengths(self, client):\n key_dict = {}\n for db, patterns in list(self.llen_keys.items()):\n client.send(\"select %d\" % db)\n try:\n resp = client.read_response()\n except RedisError as e:\n collectd.error(\"Could not select Redis db %s: %s\" % (db, e))\n continue\n\n for pattern in patterns:\n keys = []\n # If there is a glob, get every key matching it\n if \"*\" in pattern:\n client.send(\"KEYS %s\" % pattern)\n keys = client.read_response()\n else:\n keys = [pattern]\n\n for key in keys:\n self.fetch_and_dispatch_llen_for_key(client, key, db)", "def list(self, resource, url_prefix, auth, session, send_opts):\n\n req = self.get_metadata_request(\n resource, 'GET', 'application/json', url_prefix, auth)\n\n prep = session.prepare_request(req)\n resp = session.send(prep, **send_opts)\n if resp.status_code == 200:\n keys_dict = resp.json()\n return keys_dict['keys']\n\n err = ('List failed on {}, got HTTP response: ({}) - {}'.format(\n resource.name, resp.status_code, resp.text))\n raise HTTPError(err, request = req, response = resp)", "def list_keys(self, label=None):\r\n _filter = NestedDict({})\r\n if label:\r\n _filter['sshKeys']['label'] = query_filter(label)\r\n\r\n return self.client['Account'].getSshKeys(filter=_filter.to_dict())", "def keys(self):\n return list(self.iterkeys())", "async def get_keys(self):\n return self.dict.keys()", "def iterkeys(self):", "def iterkeys(self):", "def get_all_children_id_list_from_redis_by_pk(gmac_id):\n try:\n gmac = GoogleMapsAddressComponent.objects.get(pk=gmac_id)\n conn = get_redis_connection()\n key = GoogleMapsAddressComponent.get_redis_all_children_key(gmac_id)\n length = conn.llen(key)\n return conn.lrange(key, 0, length)\n except GoogleMapsAddressComponent.DoesNotExist:\n return None", "def keys(self, pattern=\"*\"):\n return self._command(b'KEYS', pattern, handler=list_of_keys)", "def keys(self) -> List[str]:\n raise NotImplementedError", "def keys(self, **kwargs) -> Iterable:\n return self.store.keys(**kwargs)", "def all(cls, connection=None):\n prefix = cls.redis_queue_namespace_prefix\n \n if connection is None:\n connection = RedisMixin.redis_conn\n def to_queue(queue_key):\n return cls.from_queue_key(queue_key)\n d = connection.keys('%s*' % prefix)\n d.addCallback(lambda keys: map(to_queue, keys))\n return d", "def get_hostkey_list(self):\n return self.hostkey", "def keys(self):\n return self.keys", "def list(self):\n return {\n k: json.loads(v)\n for k, v in iteritems(self._db.hgetall(self.index))\n }", "def ListKeys(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_keys(self, ckey=None):\n if ckey:\n keys = self._get_keys(ckey)\n else:\n keys = self.keys()\n for key in self.keys():\n keys += [k for k in self._get_keys(key)]\n return list(set(keys))", "def keys(self) -> List[str]:\n return self.__stash.keys()", "def list_keys_command(client: KeyVaultClient, args: dict[str, Any]) -> CommandResults:\n vault_name = args['vault_name']\n limit = arg_to_number(args.get('limit')) or DEFAULT_LIMIT\n offset = arg_to_number(args.get('offset')) or DEFAULT_OFFSET\n response = client.list_keys_request(vault_name, limit, offset)\n outputs = copy.deepcopy(response)\n readable_response = []\n\n for key in outputs:\n readable_response.append({\n 'key_id': key.get('kid'),\n 'managed': key.get('managed'),\n **convert_attributes_to_readable(key.get('attributes', {}).copy()),\n })\n key[VAULT_NAME_CONTEXT_FIELD] = vault_name\n key['attributes'] = convert_time_attributes_to_iso(key['attributes'])\n\n readable_output = tableToMarkdown(\n f'{vault_name} Keys List',\n readable_response,\n ['key_id', 'enabled', 'create_time', 'update_time', 'expiry_time'],\n removeNull=True,\n headerTransform=string_to_table_header)\n\n command_results = CommandResults(\n outputs_prefix='AzureKeyVault.Key',\n outputs_key_field='kid',\n outputs=outputs,\n raw_response=response,\n readable_output=readable_output,\n ignore_auto_extract=True\n )\n\n return command_results", "def keys(self):\n\n return self.keys_set", "def keys(self):\n return _keys(self)", "def keys(self):\n return _keys(self)", "def keys(self):\n return", "def keys(self):\n return DiscoDBInquiry(super(DiscoDB, self).keys)", "def keys(self):\n with self.__plock:\n return self._keys[:]", "def get(self):\n server = self.get_argument(\"server\")\n redis_info = self.stats_provider.get_info(server)\n databases=[]\n\n for key in sorted(redis_info.keys()):\n if key.startswith(\"db\"):\n database = redis_info[key]\n database['name']=key\n databases.append(database)\n\n total_keys=0\n for database in databases:\n total_keys+=database.get(\"keys\")\n\n if(total_keys==0):\n databases=[{\"name\" : \"db0\", \"keys\" : \"0\", \"expires\" : \"0\"}]\n\n redis_info['databases'] = databases\n redis_info['total_keys']= self.shorten_number(total_keys)\n\n uptime_seconds = redis_info['uptime_in_seconds']\n redis_info['uptime'] = self.shorten_time(uptime_seconds)\n\n commands_processed = redis_info['total_commands_processed']\n commands_processed = self.shorten_number(commands_processed)\n redis_info['total_commands_processed_human'] = commands_processed\n\n self.write(redis_info)", "def all_keys(self):\n return self.derivable_keys() + self.loadable_keys()", "def keys(self):\n return self._keys", "def keys(self):\n return self._keys", "def get_keys(self):\n with self.lock:\n return list(self.devices.keys())", "def hkeys(self, key):\n return self._command(b'HKEYS', key, handler=list_of_keys)", "def keys(self):\n return self._get_storage().keys()", "def return_keys(self):\r\n\r\n keys = list(self.piDD.keys())\r\n return keys", "def keys(self):\n if self._keys is not None:\n return self._keys\n self._set_keys()\n return self._keys", "def keys(self):\n return self.__keys", "def get_list(self, k: str) -> List:\n return self._redis.lrange(k, 0, -1)", "def ikeys(self, prefix=''):", "def topkList(self, key):\n \n return self.execute_command(self.TOPK_LIST, key)", "def list_buckets():\n for bucket in BUCKET_MANAGER.all_buckets():\n print(bucket)", "def keys(self, search, version=None, client=None):\r\n\r\n if client is None:\r\n client = self.get_client(write=False)\r\n\r\n pattern = self.make_key(search, version=version)\r\n try:\r\n encoding_map = [smart_text(k) for k in client.keys(pattern)]\r\n return [self.reverse_key(k) for k in encoding_map]\r\n except ConnectionError:\r\n raise ConnectionInterrupted(connection=client)", "def keys(self):\n\n return list(self.iterkeys())", "def list(self, path, filename=None, start=None, stop=None, recursive=False, directories=False):\n storageScheme, keys = self.getkeys(\n path, filename=filename, directories=directories, recursive=recursive)\n keys = [storageScheme + \":///\" + key.bucket.name + \"/\" + key.name for key in keys]\n keys.sort()\n keys = select(keys, start, stop)\n return keys", "def keys(self, mode=None):\n if self._state == 'open':\n shelve_keys = list(self._shelve.keys())\n\n else:\n self.open()\n shelve_keys = list(self._shelve.keys())\n self.close()\n\n dict_keys = list(self._dict.keys())\n\n if mode == 'shelve':\n return shelve_keys\n\n if mode == 'dict':\n return dict_keys\n\n return shelve_keys + dict_keys", "def list():\n index = 0\n while True:\n node = Node.from_index(index)\n if os.path.exists(node.path()):\n click.echo(f'{index}: node_{index}')\n click.echo(run_lncli(node, 'getinfo | jq .identity_pubkey'))\n else:\n break\n index += 1", "def _gpg_keys(self) -> ListKeys:\n return self.gpg.list_keys()", "def keys(self):\r\n return [k for k in self]", "def keys(self) -> KeysView:\n return self._dict.keys()", "def get_keys(user_id):\n\n db_conn = sqlite3.connect(db_path)\n db = db_conn.cursor()\n keys = []\n try:\n for row in db.execute(\"SELECT public_key FROM public_keys WHERE username=? AND status=?\", [user_id, PK_STATUS_OK]):\n keys.append({\"public\": row[0]})\n db_conn.close()\n except sqlite3.IntegrityError:\n db_conn.close()\n abort(400)\n if(keys == []):\n abort(404)\n return jsonify({'user':{'username':user_id, 'keys':keys}})" ]
[ "0.7487752", "0.7252233", "0.7232625", "0.7188551", "0.7036857", "0.70209503", "0.69742924", "0.6873124", "0.68472815", "0.6803024", "0.67941684", "0.6720403", "0.6652056", "0.6624582", "0.6621683", "0.660927", "0.65792507", "0.6553939", "0.65026474", "0.64909124", "0.6487889", "0.6482739", "0.64821166", "0.6471823", "0.6458607", "0.64367646", "0.6428833", "0.6424665", "0.6419062", "0.6392375", "0.6382109", "0.63633037", "0.6362194", "0.63573086", "0.635438", "0.63515", "0.63459665", "0.6345606", "0.6308359", "0.6303643", "0.6262999", "0.62288934", "0.62270993", "0.6219244", "0.6196621", "0.61879313", "0.61166114", "0.6093686", "0.608175", "0.60512245", "0.6041469", "0.60294986", "0.6028646", "0.6016451", "0.60149", "0.6014214", "0.600876", "0.60012585", "0.60012585", "0.5991661", "0.5984903", "0.5979405", "0.5963024", "0.59566134", "0.59391403", "0.5939002", "0.5932188", "0.59319276", "0.59312254", "0.5924738", "0.59214646", "0.5914403", "0.58958167", "0.58958167", "0.58897257", "0.5886181", "0.588536", "0.58819973", "0.58751965", "0.58708984", "0.58708984", "0.5867935", "0.58661395", "0.5860714", "0.5856351", "0.5849699", "0.5849075", "0.5843339", "0.5843241", "0.58202463", "0.58197623", "0.58125323", "0.5808442", "0.5804876", "0.58035684", "0.57921976", "0.57864714", "0.57844186", "0.5781936", "0.5772391" ]
0.7005117
6
Setup the optimizer and the learning rate scheduler. We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the Trainer's init, or override this method in a subclass.
def get_optimizers( self, num_training_steps ): if self.optimizers is not None: return self.optimizers # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay) ], "weight_decay": self.args.weight_decay, }, { "params": [ p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay) ], "weight_decay": 0.0, }, ] if self.args.optimizer == "sgd": optimizer = SGD(optimizer_grouped_parameters, lr=self.args.learning_rate, momentum=self.args.sgd_momentum, \ weight_decay=self.args.weight_decay) elif self.args.optimizer == "adam": optimizer = AdamW( optimizer_grouped_parameters, lr=self.args.learning_rate, eps=self.args.adam_epsilon) if self.args.lr_schedule == "constant": scheduler = get_constant_schedule_with_warmup( optimizer, num_warmup_steps=self.args.warmup_steps) elif self.args.lr_schedule == "linear": scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=num_training_steps) elif self.args.lr_schedule == "invsqrt": scheduler = get_invsqrt_schedule_with_warmup( optimizer, num_warmup_steps=self.args.warmup_steps) return optimizer, scheduler
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def configure_optimizers(self):\n allowed = list(OPTIM_LOOKUP.keys())\n if self.optimizer not in allowed:\n raise ValueError(\n f\"Illegal optimizer given. Got {self.optimizer}. Allowed: {allowed}.\"\n )\n\n allowed = list(SCHED_LOOKUP.keys())\n if self.scheduler not in allowed:\n raise ValueError(\n f\"Illegal scheduler given. Got {self.scheduler}. Allowed: {allowed}.\"\n )\n\n if self.optim_params is None:\n self.optim_params = {\n \"encoder\": {\"lr\": 0.00005, \"weight_decay\": 0.00005},\n \"decoder\": {\"lr\": 0.0005, \"weight_decay\": 0.0005},\n }\n\n params = adjust_optim_params(self.model, self.optim_params)\n optimizer = OPTIM_LOOKUP[self.optimizer](params)\n\n if self.lookahead:\n optimizer = OPTIM_LOOKUP[\"lookahead\"](optimizer, k=5, alpha=0.5)\n\n if self.scheduler_params is None:\n self.scheduler_params = {}\n\n scheduler = {\n \"scheduler\": SCHED_LOOKUP[self.scheduler](\n optimizer, **self.scheduler_params\n ),\n \"monitor\": \"val_loss\",\n \"interval\": \"epoch\",\n \"frequency\": 1,\n }\n\n return [optimizer], [scheduler]", "def configure_optimizers(self):\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": self.args.weight_decay,\n },\n {\n \"params\": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n if self.optimizer == \"adamw\":\n optimizer = AdamW(optimizer_grouped_parameters,\n betas=(0.9, 0.98), # according to RoBERTa paper\n lr=self.args.lr,\n eps=self.args.adam_epsilon,)\n elif self.optimizer == \"torch.adam\":\n optimizer = torch.optim.AdamW(optimizer_grouped_parameters,\n lr=self.args.lr,\n eps=self.args.adam_epsilon,\n weight_decay=self.args.weight_decay)\n else:\n raise ValueError(\"Optimizer type does not exist.\")\n num_gpus = len([x for x in str(self.args.gpus).split(\",\") if x.strip()])\n t_total = (len(self.train_dataloader()) // (self.args.accumulate_grad_batches * num_gpus) + 1) * self.args.max_epochs\n warmup_steps = int(self.args.warmup_proportion * t_total)\n if self.args.lr_scheduler == \"onecycle\":\n scheduler = torch.optim.lr_scheduler.OneCycleLR(\n optimizer, max_lr=self.args.lr, pct_start=float(warmup_steps/t_total),\n final_div_factor=self.args.final_div_factor,\n total_steps=t_total, anneal_strategy='linear')\n elif self.args.lr_scheduler == \"linear\":\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)\n elif self.args.lr_scheulder == \"polydecay\":\n if self.args.lr_mini == -1:\n lr_mini = self.args.lr / self.args.polydecay_ratio\n else:\n lr_mini = self.args.lr_mini\n scheduler = get_polynomial_decay_schedule_with_warmup(optimizer, warmup_steps, t_total, lr_end=lr_mini)\n else:\n raise ValueError\n return [optimizer], [{\"scheduler\": scheduler, \"interval\": \"step\"}]", "def configure_optimizers(self):\n self.optimizer = torch.optim.Adam(\n self.parameters(),\n lr=self.args.get(\"lr\", 0.001),\n weight_decay=self.args.get(\"weight_decay\", 0),\n eps=self.args.get(\"eps\", 1e-8)\n )\n self.scheduler = {\n \"scheduler\":\n torch.optim.lr_scheduler.ReduceLROnPlateau(\n self.optimizer,\n mode=\"min\",\n factor=0.2,\n patience=3,\n min_lr=1e-6,\n verbose=True,\n ),\n \"monitor\":\n \"val_loss\",\n }\n return [self.optimizer], [self.scheduler]", "def configure_optimizers(self):\n self.optimizer = AdamW(self.parameters(), lr=self.args[\"lr\"])\n self.scheduler = {\n \"scheduler\": torch.optim.lr_scheduler.ReduceLROnPlateau(\n self.optimizer,\n mode=\"min\",\n factor=0.2,\n patience=2,\n min_lr=1e-6,\n verbose=True,\n ),\n \"monitor\": \"val_loss\",\n }\n return [self.optimizer], [self.scheduler]", "def set_optimizer_params(self):\n n_params = len(self.optim_params)\n if self.optimizer_name == 'GradientDescent' and n_params == 1:\n self.optimizer = tf.keras.optimizers.SGD(\n learning_rate=self.optim_params[0],\n momentum=0)\n elif self.optimizer_name == 'Momentum' and n_params == 2:\n self.optimizer = tf.keras.optimizers.SGD(\n learning_rate=self.optim_params[0],\n momentum=self.optim_params[1])\n elif self.optimizer_name == 'AdaGrad' and n_params == 2:\n self.optimizer = tf.keras.optimizers.Adagrad(\n learning_rate=self.optim_params[0],\n initial_accumulator_value=self.optim_params[1])\n elif self.optimizer_name == 'AdaDelta' and n_params == 2:\n self.optimizer = tf.keras.optimizers.Adam(\n learning_rate=self.optim_params[0],\n rho=self.optim_params[1])\n elif self.optimizer_name == 'RMSProp' and n_params == 3:\n self.optimizer = tf.keras.optimizers.Adam(\n learning_rate=self.optim_params[0],\n rho=self.optim_params[1],\n momentum=self.optim_params[2])\n elif self.optimizer_name == 'Adam' and n_params == 3:\n self.optimizer = tf.keras.optimizers.Adam(\n learning_rate=self.optim_params[0],\n beta_1=self.optim_params[1],\n beta_2=self.optim_params[2])\n elif self.optimizer_name == 'Nadam' and n_params == 3:\n self.optimizer = tf.keras.optimizers.Nadam(\n learning_rate=self.optim_params[0],\n beta_1=self.optim_params[1],\n beta_2=self.optim_params[2])\n else:\n raise Exception(\"[ERROR] Wrong optimizer or parameters for \"\n \"optimizer\")", "def set_optimizer(self, config):\r\n self.optimizer = optim.Adam(self.net.parameters(), config.lr)\r\n self.scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer, config.lr_decay)", "def configure_optimizers(self) -> tuple[list[Optimizer], list[LRScheduler]]:\n # Original paper uses LARS optimizer, but this is not defined in PyTorch\n optimizer = Adam(\n self.parameters(),\n lr=self.hparams[\"lr\"],\n weight_decay=self.hparams[\"weight_decay\"],\n )\n max_epochs = 200\n if self.trainer and self.trainer.max_epochs:\n max_epochs = self.trainer.max_epochs\n if self.hparams[\"version\"] == 1:\n warmup_epochs = 10\n else:\n warmup_epochs = int(max_epochs * 0.05)\n lr_scheduler = SequentialLR(\n optimizer,\n schedulers=[\n LinearLR(optimizer, total_iters=warmup_epochs),\n CosineAnnealingLR(optimizer, T_max=max_epochs),\n ],\n milestones=[warmup_epochs],\n )\n return [optimizer], [lr_scheduler]", "def _set_optimizer(self):\n\n if self.optimizer_name == 'Adam':\n self.optimizer = optim.Adam(self.net.parameters(),\n lr=self.learning_rate,\n betas=self.betas,\n eps=1e-8,\n weight_decay=self.weight_decay)\n elif self.optimizer_name == 'SGD':\n self.optimizer = optim.SGD(self.net.parameters(),\n lr=self.learning_rate,\n momentum=self.momentum,\n weight_decay=self.weight_decay)\n elif self.optimizer_name == 'SGD_Nesterov':\n self.optimizer = optim.SGD(self.net.parameters(),\n lr=self.learning_rate,\n momentum=self.momentum,\n weight_decay=self.weight_decay,\n nesterov=True)\n elif self.optimizer_name == 'RMSprop':\n self.optimizer = optim.Adagrad(self.net.parameters(),\n lr=self.learning_rate,\n momentum=self.momentum,\n weight_decay=self.weight_decay)\n elif self.optimizer_name == 'Adagrad':\n self.optimizer = optim.Adagrad(self.net.parameters(),\n lr=self.learning_rate,\n weight_decay=self.weight_decay)\n else:\n print(\"Optimizer '\" + self.optimizer_name + \"' not implemented.\")", "def configure_optimizers(self):\n optimizer = optim.Adam(\n self.parameters(), lr=self.hparams.learning_rate)\n # scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10)\n return optimizer", "def set_optimizer(self, optimizer, opti_scheduler=True, loss_scheduler_patience=7):\n self.optimizer = optimizer\n if opti_scheduler:\n self.opti_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer,\n 'min', # overwritten by update_dev_scores\n patience=loss_scheduler_patience,\n verbose=True)", "def __configure_optimizer(self, learning_rate):\n if self.optimizer == 'adadelta':\n optimizer = tf.train.AdadeltaOptimizer(\n learning_rate,\n rho=self.adadelta_rho,\n epsilon=self.opt_epsilon)\n elif self.optimizer == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(\n learning_rate,\n initial_accumulator_value=self.adagrad_initial_accumulator_value)\n elif self.optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(\n learning_rate,\n beta1=self.adam_beta1,\n beta2=self.adam_beta2,\n epsilon=self.opt_epsilon)\n elif self.optimizer == 'ftrl':\n optimizer = tf.train.FtrlOptimizer(\n learning_rate,\n learning_rate_power=self.ftrl_learning_rate_power,\n initial_accumulator_value=self.ftrl_initial_accumulator_value,\n l1_regularization_strength=self.ftrl_l1,\n l2_regularization_strength=self.ftrl_l2)\n elif self.optimizer == 'momentum':\n optimizer = tf.train.MomentumOptimizer(\n learning_rate,\n momentum=self.momentum,\n name='Momentum')\n elif self.optimizer == 'rmsprop':\n optimizer = tf.train.RMSPropOptimizer(\n learning_rate,\n decay=self.rmsprop_decay,\n momentum=self.rmsprop_momentum,\n epsilon=self.opt_epsilon)\n elif self.optimizer == 'sgd':\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n else:\n raise ValueError('Optimizer [%s] was not recognized', self.optimizer)\n return optimizer", "def configure_optimizers(self):\n optimizer = torch.optim.Adam(\n self.model.parameters(),\n lr=self.lr,\n weight_decay=self.weight_decay,\n )\n\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": {\n \"scheduler\": ReduceLROnPlateau(\n optimizer=optimizer,\n mode=\"min\",\n factor=0.5,\n patience=self.patience,\n ),\n \"monitor\": \"train_loss\",\n },\n }", "def configure_optimizers(self):\n optimizer = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)\n scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10)\n return [optimizer], [scheduler]", "def configure_schedulers(self, **kwargs):\n if self.scheduler_type == 'CosineAnnealingWarmRestarts':\n self.scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(self.optimizer, **self.scheduler_params)\n elif self.scheduler_type == 'CosineAnnealingLR':\n self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer, **self.scheduler_params)\n elif self.scheduler_type == 'CyclicLR':\n self.scheduler = torch.optim.lr_scheduler.CyclicLR(self.optimizer, **self.scheduler_params)\n else:\n self.scheduler = None", "def configure_optimizers(self) -> dict[str, Any]:\n optimizer = torch.optim.Adam(\n self.model.parameters(), lr=self.hyperparams[\"learning_rate\"]\n )\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": {\n \"scheduler\": ReduceLROnPlateau(\n optimizer,\n patience=self.hyperparams[\"learning_rate_schedule_patience\"],\n ),\n \"monitor\": \"val_loss\",\n },\n }", "def configure_optimizer(learning_rate):\n\tif train_config['optimizer'] == 'adadelta':\n\t\toptimizer = tf.train.AdadeltaOptimizer(learning_rate,\n\t\t rho=train_config['adadelta_rho'],\n\t\t epsilon=train_config['opt_epsilon'])\n\telif train_config['optimizer'] == 'dadgrad':\n\t\toptimizer = tf.train.AdagradDAOptimizer(\n\t\t\tlearning_rate,\n\t\t\tinitial_gradient_squared_accumulator_value=train_config['adagrad_initial_accumulator_value'])\n\telif train_config['optimizer'] == 'adam':\n\t\toptimizer = tf.train.AdamOptimizer(\n\t\t\tlearning_rate,\n\t\t\tbeta1=train_config['adam_beta1'],\n\t\t\tbeta2=train_config['adam_beta2'],\n\t\t\tepsilon=train_config['opt_epsilon'])\n\telif train_config['optimizer'] == 'ftrl':\n\t\toptimizer = tf.train.FtrlOptimizer(\n\t\t\tlearning_rate,\n\t\t\tlearning_rate_power=train_config['ftrl_learning_rate_power'],\n\t\t\tinitial_accumulator_value=train_config['ftrl_initial_accumulator_value'],\n\t\t\tl1_regularization_strength=train_config['ftrl_l1'],\n\t\t\tl2_regularization_strength=train_config['ftrl_l2'])\n\telif train_config['optimizer'] == 'momentum':\n\t\toptimizer = tf.train.MomentumOptimizer(\n\t\t\tlearning_rate,\n\t\t\tmomentum=train_config['momentum'],\n\t\t\tname='Momentum')\n\telif train_config['optimizer'] == 'rmsprop':\n\t\toptimizer = tf.train.RMSPropOptimizer(\n\t\t\tlearning_rate,\n\t\t\tdecay=train_config['rmsprop_decay'],\n\t\t\tmomentum=train_config['rmsprop_momentum'],\n\t\t\tepsilon=train_config['opt_epsilon'])\n\telif train_config['optimizer'] == 'sgd':\n\t\toptimizer = tf.train.GradientDescentOptimizer(learning_rate)\n\telse:\n\t\traise ValueError('Optimizer [%s] was not recognized' % train_config['optimizer'])\n\treturn optimizer", "def configure_optimizers(self):\n model = self.model\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": self.hparams.weight_decay,\n },\n {\n \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n if self.hparams.lamb:\n optimizer = FusedLAMB(\n optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon)\n\n elif self.hparams.adafactor:\n optimizer = Adafactor(\n optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False\n )\n else:\n optimizer = FusedAdam(\n optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon)\n self.opt = optimizer\n\n scheduler = self.get_lr_scheduler()\n\n return [optimizer], [scheduler]", "def set_optimizer(self, probe):\n if 'weight_decay' in self.args['probe_training']:\n weight_decay = self.args['probe_training']['weight_decay']\n else:\n weight_decay = 0\n if 'scheduler_patience' in self.args['probe_training']:\n scheduler_patience = self.args['probe_training']['scheduler_patience']\n else:\n scheduler_patience = 0\n \n learning_rate = 0.001 if not 'learning_rate' in self.args['probe_training'] else\\\n self.args['probe_training']['learning_rate']\n \n scheduler_factor = 0.5 if not 'scheduler_factor' in self.args['probe_training'] else\\\n self.args['probe_training']['scheduler_factor']\n\n self.optimizer = optim.Adam(probe.parameters(), lr=learning_rate, weight_decay=weight_decay)\n self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer,\n mode='min',\n factor=scheduler_factor,\n patience=scheduler_patience)", "def set_optimizer(self, probe):\n if 'weight_decay' in self.args['probe_training']:\n weight_decay = self.args['probe_training']['weight_decay']\n else:\n weight_decay = 0\n if 'scheduler_patience' in self.args['probe_training']:\n scheduler_patience = self.args['probe_training']['scheduler_patience']\n else:\n scheduler_patience = 0\n \n learning_rate = 0.001 if not 'learning_rate' in self.args['probe_training'] else\\\n self.args['probe_training']['learning_rate']\n \n scheduler_factor = 0.5 if not 'scheduler_factor' in self.args['probe_training'] else\\\n self.args['probe_training']['scheduler_factor']\n\n self.optimizer = optim.Adam(probe.parameters(), lr=learning_rate, weight_decay=weight_decay)\n self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer,\n mode='min',\n factor=scheduler_factor,\n patience=scheduler_patience)", "def configure_optimizers(self) -> Dict[str, Any]:\n optimizer = torch.optim.AdamW(\n self.model.parameters(), lr=self.hparams[\"learning_rate\"]\n )\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": {\n \"scheduler\": ReduceLROnPlateau(\n optimizer, patience=self.hparams[\"learning_rate_schedule_patience\"]\n ),\n \"monitor\": \"val_loss\",\n },\n }", "def configure_optimizers(\n self,\n ) -> Dict[str, Union[torch.optim.Optimizer, Dict[str, Union[str, _LRScheduler]]]]:\n optimizer = instantiate(self.optimizer_cfg, params=self.encoder.parameters())\n scheduler = {\n \"scheduler\": instantiate(self.scheduler_cfg, optimizer),\n \"monitor\": \"train_err1\",\n }\n\n return dict(optimizer=optimizer, lr_scheduler=scheduler)", "def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)\n return optimizer", "def configure_optimizers(self):\n\n optimizer = self.optimizer(self.parameters(), lr=self.lr)\n\n return optimizer", "def configure_optimizers(self):\n optimizer = torch.optim.Adam(\n self.parameters(), lr=self.hparams[\"learning_rate\"]\n )\n return optimizer", "def setOptimizerParams(self,lr,momentum,decay):\n self.optimizer = SGD(lr=lr,momentum=momentum,decay=decay)", "def _init_optimizer(self, optimizer):\n if optimizer == \"rmsprop\":\n self.optimizer = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06)\n elif optimizer == \"adagrad\":\n self.optimizer = keras.optimizers.Adagrad(lr=0.01, epsilon=1e-06)\n elif optimizer == \"adadelta\":\n self.optimizer = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06)\n elif optimizer == \"adam\":\n self.optimizer = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n elif optimizer == \"adamax\":\n self.optimizer = keras.optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08) \n elif hasattr(optimizer, __call__):\n self.optimizer = optimizer\n else:\n print \"Error: unsupported optimizer %s\"%optimizer\n sys.exit(0)", "def _init_optimizer(self, optimizer):\n if optimizer == \"rmsprop\":\n self.optimizer = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06)\n elif optimizer == \"adagrad\":\n self.optimizer = keras.optimizers.Adagrad(lr=0.01, epsilon=1e-06)\n elif optimizer == \"adadelta\":\n self.optimizer = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06)\n elif optimizer == \"adam\":\n self.optimizer = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n elif optimizer == \"adamax\":\n self.optimizer = keras.optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08) \n elif hasattr(optimizer, __call__):\n self.optimizer = optimizer\n else:\n print \"Error: unsupported optimizer %s\"%optimizer\n sys.exit(0)", "def _inst_optimizer(self):\n optimizer = Optimizers(self.m_cfg['configs']['lr_politics']['optimizer']).value\n lr_schedule = self.m_cfg['configs']['lr_politics']['lr']\n opt = optimizer(learning_rate=lr_schedule)\n return opt", "def configure_optimizers(self):\n optimizer = torch.optim.Adam(\n self.parameters(),\n lr=self.hparams.learning_rate,\n weight_decay=self.hparams.weight_decay,\n )\n return optimizer", "def adjust_learning_rate(args,optimizer, epoch):\n lr = args.lr\n schedule = args.lr_schedule\n # schedule from TRADES repo (different from paper due to bug there)\n if schedule == 'trades':\n if epoch >= 0.75 * args.epochs:\n lr = args.lr * 0.1\n # schedule as in TRADES paper\n elif schedule == 'trades_fixed':\n if epoch >= 0.75 * args.epochs:\n lr = args.lr * 0.1\n if epoch >= 0.9 * args.epochs:\n lr = args.lr * 0.01\n if epoch >= args.epochs:\n lr = args.lr * 0.001\n # cosine schedule\n elif schedule == 'cosine':\n lr = args.lr * 0.5 * (1 + np.cos((epoch - 1) / args.epochs * np.pi))\n # schedule as in WRN paper\n elif schedule == 'wrn':\n if epoch >= 0.3 * args.epochs:\n lr = args.lr * 0.2\n if epoch >= 0.6 * args.epochs:\n lr = args.lr * 0.2 * 0.2\n if epoch >= 0.8 * args.epochs:\n lr = args.lr * 0.2 * 0.2 * 0.2\n else:\n raise ValueError('Unkown LR schedule %s' % schedule)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr", "def setup_optimiser(self):\n self.optimiser = ScheduledOptim(\n optim.Adam(\n filter(lambda x: x.requires_grad, self.model.parameters()),\n betas=(0.9, 0.98), eps=1e-09, lr=self.opt.learning_rate),\n self.opt.d_model, self.opt.n_warmup_steps)\n if self.opt.verbose:\n print(\"[Info] optimiser configured.\")", "def configure_optimizer(learning_rate):\n if FLAGS.optimizer == 'adadelta':\n optimizer = tf.train.AdadeltaOptimizer(\n learning_rate,\n rho=FLAGS.adadelta_rho,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(\n learning_rate,\n initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)\n elif FLAGS.optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(\n learning_rate,\n beta1=FLAGS.adam_beta1,\n beta2=FLAGS.adam_beta2,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'ftrl':\n optimizer = tf.train.FtrlOptimizer(\n learning_rate,\n learning_rate_power=FLAGS.ftrl_learning_rate_power,\n initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,\n l1_regularization_strength=FLAGS.ftrl_l1,\n l2_regularization_strength=FLAGS.ftrl_l2)\n elif FLAGS.optimizer == 'momentum':\n optimizer = tf.train.MomentumOptimizer(\n learning_rate,\n momentum=FLAGS.momentum,\n name='Momentum')\n elif FLAGS.optimizer == 'rmsprop':\n optimizer = tf.train.RMSPropOptimizer(\n learning_rate,\n decay=FLAGS.rmsprop_decay,\n momentum=FLAGS.rmsprop_momentum,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'sgd':\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n elif FLAGS.optimizer == \"adamweightdecay\":\n optimizer = AdamWeightDecayOptimizer(\n learning_rate=learning_rate,\n weight_decay_rate=0.01,\n beta_1=FLAGS.adam_beta1,\n beta_2=FLAGS.adam_beta2,\n epsilon=FLAGS.opt_epsilon,\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\n else:\n raise ValueError('Optimizer [%s] was not recognized' % FLAGS.optimizer)\n return optimizer", "def setup_optimizers(self, *args, **kwargs):\n\n # self.optimizers.append(...)\n # self.loss.append(...)\n pass", "def _configure_optimizer(learning_rate):\n if FLAGS.optimizer == 'adadelta':\n optimizer = tf.train.AdadeltaOptimizer(\n learning_rate,\n rho=FLAGS.adadelta_rho,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(\n learning_rate,\n initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)\n elif FLAGS.optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(\n learning_rate,\n beta1=FLAGS.adam_beta1,\n beta2=FLAGS.adam_beta2,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'ftrl':\n optimizer = tf.train.FtrlOptimizer(\n learning_rate,\n learning_rate_power=FLAGS.ftrl_learning_rate_power,\n initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,\n l1_regularization_strength=FLAGS.ftrl_l1,\n l2_regularization_strength=FLAGS.ftrl_l2)\n elif FLAGS.optimizer == 'momentum':\n optimizer = tf.train.MomentumOptimizer(\n learning_rate,\n momentum=FLAGS.momentum,\n name='Momentum')\n elif FLAGS.optimizer == 'rmsprop':\n optimizer = tf.train.RMSPropOptimizer(\n learning_rate,\n decay=FLAGS.rmsprop_decay,\n momentum=FLAGS.rmsprop_momentum,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'sgd':\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n else:\n raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer)\n return optimizer", "def configure_optimizers(self):\n opt = torch.optim.AdamW(\n self.parameters(), lr=self.lr, weight_decay=self.weight_decay\n )\n return {\n \"optimizer\": opt,\n \"lr_scheduler\": {\n \"scheduler\": torch.optim.lr_scheduler.ReduceLROnPlateau(opt),\n \"monitor\": \"train_loss\",\n \"frequency\": 1,\n },\n }", "def configure_optimizers(self):\n optimizer = _get_optimizer(model_parameters=self.parameters(\n ), project_parameters=self.project_parameters)\n if self.project_parameters.step_size > 0:\n lr_scheduler = _get_lr_scheduler(\n project_parameters=self.project_parameters, optimizer=optimizer)\n return [optimizer], [lr_scheduler]\n else:\n return optimizer", "def optimizer_config(self):\r\n return {\r\n \"lr\": self.args.lr[0],\r\n \"momentum\": self.args.momentum,\r\n \"weight_decay\": self.args.weight_decay,\r\n }", "def setup(self, opt):\n if self.isTrain:\n self.schedulers = [base_function.get_scheduler(optimizer, opt) for optimizer in self.optimizers]\n if not self.isTrain or opt.continue_train:\n self.load_networks(opt.which_iter)", "def get_scheduler(optimizer, opt):\n \n epochs_no_decay = opt.epochs - opt.lr_linear\n lr_policy = opt.lr_policy\n \n if lr_policy == 'linear':\n def lr_lambda(epoch):\n return 1. - max(0, epoch - epochs_no_decay) / float(opt.lr_linear + 1)\n scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)\n elif lr_policy == 'step':\n # multiply by gamma every lr_decay_steps\n # for example lr_decay_steps=50 and initial learning = .5\n # then we have \n # lr = .5 for 0 <= epoch < 50;\n # lr = .05 for 50 <= epoch < 100;\n # lr = .005 for 100 <= epoch < 150;\n scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_step, gamma=.1)\n elif lr_policy == 'plateau':\n # Reduce learning rate when a metric has stopped improving. \n # Models often benefit from reducing the learning rate by a factor of 2-10 once learning stagnates. \n # This scheduler reads a metrics quantity and if no improvement \n # is seen for a ‘patience’ number of epochs, \n # the learning rate is reduced.\n # Parameters\n # - mode (str, default=min): In `min` mode, lr will be reduced when the quantity monitored has stopped decreasing; \n # in `max` mode, lr will be reduced when the quantity monitored has stopped increasing.\n # - factor (float, default=.1): Factor by which the learning rate will be reduced. new_lr = lr * factor.\n # - patience (int, default=10): Number of epochs with no improvement after which learning rate will be reduced. \n # - threshold (float): only decrease lr if the change in the quantitiy monitored is smaller than threshold. \n # Say we have threshold=0.001, if loss is $18.0$ on epoch $n$ and loss is $17.9999$ on epoch $n+1$,\n # then multiply current learning rate by the factor.\n # On the contrary, if the loss is 17.99, lr doesn't have to be changed.\n scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=.2, threshold=.01, patience=5)\n else:\n return NotImplementedError(f'learning rate policy {lr_policy} is not implemented')\n return scheduler", "def _configure_optimizer(learning_rate):\n if FLAGS.optimizer == 'adadelta':\n optimizer = tf.train.AdadeltaOptimizer(\n learning_rate,\n rho=FLAGS.adadelta_rho,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(\n learning_rate,\n initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)\n elif FLAGS.optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(\n learning_rate,\n beta1=FLAGS.adam_beta1,\n beta2=FLAGS.adam_beta2,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'ftrl':\n optimizer = tf.train.FtrlOptimizer(\n learning_rate,\n learning_rate_power=FLAGS.ftrl_learning_rate_power,\n initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,\n l1_regularization_strength=FLAGS.ftrl_l1,\n l2_regularization_strength=FLAGS.ftrl_l2)\n elif FLAGS.optimizer == 'momentum':\n optimizer = tf.train.MomentumOptimizer(\n learning_rate,\n momentum=FLAGS.momentum,\n use_nesterov=True,\n name='Momentum')\n elif FLAGS.optimizer == 'rmsprop':\n optimizer = tf.train.RMSPropOptimizer(\n learning_rate,\n decay=FLAGS.rmsprop_decay,\n momentum=FLAGS.momentum,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'sgd':\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n else:\n raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer)\n return optimizer", "def _configure_optimizer(learning_rate):\n if FLAGS.optimizer == 'adadelta':\n optimizer = tf.train.AdadeltaOptimizer(\n learning_rate,\n rho=FLAGS.adadelta_rho,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(\n learning_rate,\n initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)\n elif FLAGS.optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(\n learning_rate,\n beta1=FLAGS.adam_beta1,\n beta2=FLAGS.adam_beta2,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'ftrl':\n optimizer = tf.train.FtrlOptimizer(\n learning_rate,\n learning_rate_power=FLAGS.ftrl_learning_rate_power,\n initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,\n l1_regularization_strength=FLAGS.ftrl_l1,\n l2_regularization_strength=FLAGS.ftrl_l2)\n elif FLAGS.optimizer == 'momentum':\n optimizer = tf.train.MomentumOptimizer(\n learning_rate,\n momentum=FLAGS.momentum,\n name='Momentum')\n elif FLAGS.optimizer == 'rmsprop':\n optimizer = tf.train.RMSPropOptimizer(\n learning_rate,\n decay=FLAGS.rmsprop_decay,\n momentum=FLAGS.rmsprop_momentum,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'sgd':\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n else:\n raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer)\n return optimizer", "def configure_optimizers(self) -> Tuple[List[optim.Optimizer], List[optim.lr_scheduler._LRScheduler]]:\n if (\n (\"weight_decay\" in self.optimizer_params)\n and (self.optimizer_params[\"weight_decay\"] != 0)\n and self.exclude_bn_bias\n ):\n defaults = copy(self.optimizer_params)\n weight_decay = defaults.pop(\"weight_decay\")\n\n wd_group = []\n nowd_group = []\n for name, tensor in self.named_parameters():\n if not tensor.requires_grad:\n continue\n if (\"bias\" in name) or (\"bn\" in name):\n nowd_group.append(tensor)\n else:\n wd_group.append(tensor)\n\n params = [\n {\"params\": wd_group, \"weight_decay\": weight_decay},\n {\"params\": nowd_group, \"weight_decay\": 0.0},\n ]\n optimizer = self.optimizer_class(params, **defaults)\n else:\n optimizer = self.optimizer_class(self.parameters(), **self.optimizer_params)\n lr_scheduler = self.lr_scheduler_class(optimizer, **self.lr_scheduler_params)\n return [optimizer], [lr_scheduler]", "def configure_optimizer(learning_rate):\r\n if FLAGS.optimizer == 'adadelta':\r\n optimizer = tf.train.AdadeltaOptimizer(learning_rate, \r\n rho=FLAGS.adadelta_rho,epsilon=FLAGS.opt_epsilon)\r\n elif FLAGS.optimizer == 'adagrad':\r\n optimizer = tf.train.AdagradOptimizer(learning_rate,\r\n initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)\r\n elif FLAGS.optimizer == 'adam':\r\n optimizer = tf.train.AdamOptimizer(learning_rate,\r\n beta1=FLAGS.adam_beta1,beta2=FLAGS.adam_beta2,epsilon=FLAGS.opt_epsilon)\r\n elif FLAGS.optimizer == 'ftrl':\r\n optimizer = tf.train.FtrlOptimizer(learning_rate,learning_rate_power=FLAGS.ftrl_learning_rate_power,\r\n initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,\r\n l1_regularization_strength=FLAGS.ftrl_l1,l2_regularization_strength=FLAGS.ftrl_l2)\r\n elif FLAGS.optimizer == 'momentum':\r\n optimizer = tf.train.MomentumOptimizer(learning_rate,\r\n momentum=FLAGS.momentum,name='Momentum')\r\n elif FLAGS.optimizer == 'rmsprop':\r\n optimizer = tf.train.RMSPropOptimizer(learning_rate,decay=FLAGS.rmsprop_decay,\r\n momentum=FLAGS.rmsprop_momentum,epsilon=FLAGS.opt_epsilon)\r\n elif FLAGS.optimizer == 'sgd':\r\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)\r\n else:\r\n raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer)\r\n return optimizer", "def setup_optims(self):\n lr = self.train_config['lr']\n b1 = self.train_config['b1']\n b2 = self.train_config['b2']\n weight_decay = self.train_config['weight_decay']\n self.opt = torch.optim.Adam(self.network.parameters(), lr=lr, betas=(b1, b2),\n weight_decay=weight_decay)", "def exp_lr_scheduler(optimizer, epoch, init_lr=0.01, lr_decay_epoch=10):\n lr = init_lr * (0.8**(epoch // lr_decay_epoch))\n print('LR is set to {}'.format(lr))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n return optimizer", "def initialize_optimizer(model, args):\n parameters = [p for p in model.parameters() if p.requires_grad]\n if args.optimizer == 'sgd':\n optimizer = optim.SGD(parameters, args.learning_rate,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n elif args.optimizer == 'adam':\n optimizer = optim.Adam(parameters, args.learning_rate,\n weight_decay=args.weight_decay)\n elif args.optimizer == 'adamax':\n optimizer = optim.Adamax(parameters, args.learning_rate,\n weight_decay=args.weight_decay)\n elif args.optimizer == 'adagrad':\n optimizer = optim.Adagrad(parameters, args.learning_rate,\n weight_decay=args.weight_decay)\n scheduler = ReduceLROnPlateau(optimizer, 'min', verbose=True)\n return optimizer, scheduler", "def exp_lr_scheduler(optimizer, epoch, init_lr=0.01, lr_decay_epoch=10):\r\n lr = init_lr * (0.8**(epoch // lr_decay_epoch))\r\n print('LR is set to {}'.format(lr))\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr\r\n\r\n return optimizer", "def adjust_learning_rate(opt, optimizer, epoch):\n epoch = copy.deepcopy(epoch)\n lr = opt.maxlr\n wd = opt.weightDecay\n if opt.learningratescheduler == 'imagenetscheduler':\n if epoch >= 1 and epoch <= 18:\n lr = 1e-3\n wd = 5e-5\n elif epoch >= 19 and epoch <= 29:\n lr = 5e-4\n wd = 5e-5\n elif epoch >= 30 and epoch <= 43:\n lr = 1e-4\n wd = 0\n elif epoch >= 44 and epoch <= 52:\n lr = 5e-5\n wd = 0\n elif epoch >= 53:\n lr = 2e-5\n wd = 0\n if opt.optimType=='sgd':\n lr *= 10\n opt.lr = lr\n opt.weightDecay = wd\n if opt.learningratescheduler == 'decayscheduler':\n while epoch >= opt.decayinterval:\n lr = lr/opt.decaylevel\n epoch = epoch - opt.decayinterval\n lr = max(lr,opt.minlr)\n opt.lr = lr\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n param_group['weight_decay'] = wd", "def configure_optimizers(self):\n model = self.model\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": self.hparams.weight_decay,\n },\n {\n \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n if self.hparams.lamb:\n optimizer_reduced_precision_type = self.config.dtype if self.hparams.allreduce_post_accumulation_half_precision else None\n optimizer = FusedMixedPrecisionLamb(\n optimizer_grouped_parameters,\n lr=self.hparams.learning_rate,\n eps=self.hparams.adam_epsilon,\n max_grad_norm=self.hparams.gradient_clip_val,\n reduced_precision_dtype=optimizer_reduced_precision_type)\n elif self.hparams.allreduce_post_accumulation_half_precision:\n raise ValueError(\"--allreduce_post_accumulation_half_precision is only supported on LAMB optimizer\")\n elif self.hparams.adafactor:\n optimizer = Adafactor(\n optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False\n )\n else:\n optimizer = FusedAdam(\n optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon)\n self.opt = optimizer\n\n scheduler = self.get_lr_scheduler()\n\n return [optimizer], [scheduler]", "def _update_initial_learning_rate(configs, learning_rate):\n\n optimizer_type = get_optimizer_type(configs[\"train_config\"])\n if optimizer_type == \"rms_prop_optimizer\":\n optimizer_config = configs[\"train_config\"].optimizer.rms_prop_optimizer\n elif optimizer_type == \"momentum_optimizer\":\n optimizer_config = configs[\"train_config\"].optimizer.momentum_optimizer\n elif optimizer_type == \"adam_optimizer\":\n optimizer_config = configs[\"train_config\"].optimizer.adam_optimizer\n else:\n raise TypeError(\"Optimizer %s is not supported.\" % optimizer_type)\n\n learning_rate_type = get_learning_rate_type(optimizer_config)\n if learning_rate_type == \"constant_learning_rate\":\n constant_lr = optimizer_config.learning_rate.constant_learning_rate\n constant_lr.learning_rate = learning_rate\n elif learning_rate_type == \"exponential_decay_learning_rate\":\n exponential_lr = (\n optimizer_config.learning_rate.exponential_decay_learning_rate)\n exponential_lr.initial_learning_rate = learning_rate\n elif learning_rate_type == \"manual_step_learning_rate\":\n manual_lr = optimizer_config.learning_rate.manual_step_learning_rate\n original_learning_rate = manual_lr.initial_learning_rate\n learning_rate_scaling = float(learning_rate) / original_learning_rate\n manual_lr.initial_learning_rate = learning_rate\n for schedule in manual_lr.schedule:\n schedule.learning_rate *= learning_rate_scaling\n elif learning_rate_type == \"cosine_decay_learning_rate\":\n cosine_lr = optimizer_config.learning_rate.cosine_decay_learning_rate\n learning_rate_base = cosine_lr.learning_rate_base\n warmup_learning_rate = cosine_lr.warmup_learning_rate\n warmup_scale_factor = warmup_learning_rate / learning_rate_base\n cosine_lr.learning_rate_base = learning_rate\n cosine_lr.warmup_learning_rate = warmup_scale_factor * learning_rate\n else:\n raise TypeError(\"Learning rate %s is not supported.\" % learning_rate_type)", "def _initialize_trainer(self):\n self.cost = mse(0., 0.)\n for task_id in self.task_ids.keys():\n self.cost += self.model.get_layer(task_id + '-loss')\n\n opt = Optimizer(self.cost)\n self.optimizer = opt.get_adagrad(self.learning_rate)", "def _set_learning_rates(\n self, lr_a=None, lr_alpha=None, lr_l=None, lr_labda=None, lr_c=None\n ):\n if lr_a:\n self._a_train.lr.assign(lr_a)\n if lr_alpha:\n self._alpha_train.lr.assign(lr_alpha)\n if self.use_lyapunov:\n if lr_l:\n self._l_train.lr.assign(lr_l)\n if lr_labda:\n self._lambda_train.lr.assign(lr_labda)\n else:\n if lr_c:\n self._q_train.lr.assign(lr_c)", "def exp_lr_scheduler(optimizer, epoch, init_lr=0.01, lr_decay_epoch=decay):\n lr = init_lr * (0.1**(epoch // lr_decay_epoch))\n\n if epoch % lr_decay_epoch == 0:\n print('LR is set to {}'.format(lr))\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n return optimizer", "def reschedule_learning_rate(model, epoch, scheduler):\n if epoch == 7:\n optimizer = torch.optim.SGD(model.parameters(), lr=0.005)\n current_lr = next(iter(optimizer.param_groups))[\"lr\"]\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n optimizer, 6, eta_min=current_lr / 100, last_epoch=-1\n )\n if epoch == 13:\n optimizer = torch.optim.SGD(model.parameters(), lr=0.005)\n current_lr = next(iter(optimizer.param_groups))[\"lr\"]\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n optimizer, 6, eta_min=current_lr / 100, last_epoch=-1\n )\n if epoch == 19:\n optimizer = torch.optim.SGD(model.parameters(), lr=0.002)\n current_lr = next(iter(optimizer.param_groups))[\"lr\"]\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n optimizer, 6, eta_min=current_lr / 100, last_epoch=-1\n )\n if epoch == 25:\n optimizer = torch.optim.SGD(model.parameters(), lr=0.002)\n current_lr = next(iter(optimizer.param_groups))[\"lr\"]\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n optimizer, 6, eta_min=current_lr / 100, last_epoch=-1\n )\n\n return model, scheduler", "def update_learning_rate(self) -> None:\n optimizer = list(self.optimizers.values())[0]\n old_lr = optimizer.param_groups[0]['lr']\n for name, scheduler in self.schedulers.items():\n if name == 'generator' and self.opt.generator_scheduler_name == 'plateau':\n scheduler.step(self.metric)\n elif name == 'discriminator' and self.opt.discriminator_scheduler_name == 'plateau':\n scheduler.step(self.metric)\n else:\n scheduler.step()\n\n lr = optimizer.param_groups[0]['lr']\n print('learning rate %.7f -> %.7f' % (old_lr, lr))\n return", "def _create_train_op(self):\n self.lr = self.learning_rate\n # global_step = tf.train.get_or_create_global_step()\n learning_rate = tf.constant(value=self.learning_rate, shape=[], dtype=tf.float32)\n learning_rate =tf.train.exponential_decay(learning_rate,self.global_step,2*self.num_warm_up,0.96,staircase=True,name=\"exponential_decay\")\n\n # Implements linear warmup. I.e., if global_step < num_warmup_steps, the\n # learning rate will be `global_step/num_warmup_steps * init_lr`.\n if self.num_warm_up:\n global_steps_int = tf.cast(self.global_step, tf.int32)\n warmup_steps_int = tf.constant(self.num_warm_up, dtype=tf.int32)\n\n global_steps_float = tf.cast(global_steps_int, tf.float32)\n warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)\n\n warmup_percent_done = global_steps_float / warmup_steps_float\n warmup_learning_rate = self.learning_rate * warmup_percent_done\n\n is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)\n learning_rate = (\n (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)\n self.current_learning_rate = learning_rate\n if self.optim_type == 'adagrad':\n self.optimizer = tf.train.AdagradOptimizer(self.lr)\n elif self.optim_type == 'adam':\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)\n elif self.optim_type == 'rprop':\n self.optimizer = tf.train.RMSPropOptimizer(self.lr)\n elif self.optim_type == 'sgd':\n self.optimizer = tf.train.GradientDescentOptimizer(self.lr)\n elif self.optim_type == \"bert\":\n self.optimizer = AdamWeightDecayOptimizer(learning_rate=learning_rate, weight_decay_rate=0.01, beta_1=0.9,\n beta_2=0.999, epsilon=1e-6,\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\n else:\n raise NotImplementedError('Unsupported optimizer: {}'.format(self.optim_type))\n\n self.logger.info(\"applying optimize %s\" % self.optim_type)\n if self.clip_weight:\n # clip_weight\n tvars = tf.trainable_variables()\n grads = tf.gradients(self.loss, tvars)\n grads, _ = tf.clip_by_global_norm(grads, clip_norm=self.max_norm_grad)\n grad_var_pairs = zip(grads, tvars)\n train_op = self.optimizer.apply_gradients(grad_var_pairs, name='apply_grad', global_step=self.global_step)\n new_global_step = self.global_step + 1\n train_op = tf.group(train_op, [self.global_step.assign(new_global_step)])\n self.train_op = train_op\n else:\n self.train_op = self.optimizer.minimize(self.loss, global_step=self.global_step)", "def configure_optimizer(learning_rate):\n if hp.optimizer == 'adadelta':\n optimizer = tf.train.AdadeltaOptimizer(\n learning_rate,\n rho=hp.adadelta_rho,\n epsilon=hp.opt_epsilon)\n elif hp.optimizer == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(\n learning_rate,\n initial_accumulator_value=hp.adagrad_initial_accumulator_value)\n elif hp.optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(\n learning_rate,\n beta1=hp.adam_beta1,\n beta2=hp.adam_beta2,\n epsilon=hp.opt_epsilon)\n elif hp.optimizer == 'ftrl':\n optimizer = tf.train.FtrlOptimizer(\n learning_rate,\n learning_rate_power=hp.ftrl_learning_rate_power,\n initial_accumulator_value=hp.ftrl_initial_accumulator_value,\n l1_regularization_strength=hp.ftrl_l1,\n l2_regularization_strength=hp.ftrl_l2)\n elif hp.optimizer == 'momentum':\n optimizer = tf.train.MomentumOptimizer(\n learning_rate,\n momentum=hp.momentum,\n name='Momentum')\n elif hp.optimizer == 'rmsprop':\n optimizer = tf.train.RMSPropOptimizer(\n learning_rate,\n decay=hp.rmsprop_decay,\n momentum=hp.rmsprop_momentum,\n epsilon=hp.opt_epsilon)\n elif hp.optimizer == 'sgd':\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n else:\n raise ValueError('Optimizer [%s] was not recognized', hp.optimizer)\n return optimizer", "def adjust_learning_rate(init_lr, optimizer, epoch, n=100):\n init_lr = init_lr * (0.1 ** (epoch // n))\n print('learning rate : ', init_lr)\n for param_group in optimizer.param_groups:\n param_group['lr'] = init_lr", "def init_scheduler(self):\n gamma = self.config_dict.get(\"gamma\")\n if gamma is None:\n return None\n else:\n return torch.optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=gamma)", "def initialize(self): \n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=config.LR)", "def setup(self, opt):\n if self.isTrain:\n self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]\n if not self.isTrain or opt.continue_train:\n load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch\n self.load_networks(load_suffix)\n self.print_networks(opt.verbose)", "def adjust_learning_rate(optimizer, epoch):\n lr = config.optimizer.lr\n schedule = config.lr_schedule if hasattr(config, 'lr_schedule') else 'fixed'\n if schedule == 'fixed':\n if epoch >= 0.75 * config.epochs:\n lr = config.optimizer.lr * 0.1\n if epoch >= 0.9 * config.epochs:\n lr = config.optimizer.lr * 0.01\n if epoch >= config.epochs:\n lr = config.optimizer.lr * 0.001\n # cosine schedule\n elif schedule == 'cosine':\n lr = config.optimizer.lr * 0.5 * (1 + np.cos((epoch - 1) / config.epochs * np.pi))\n elif schedule == 'search':\n if epoch >= 75:\n lr = 0.01\n if epoch >= 90:\n lr = 0.001\n else:\n raise ValueError('Unkown LR schedule %s' % schedule)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr", "def _set_train_params(self,\n lr: float = 1e-3,\n l2norm: float = 1e-2,\n ):\n self.lr = lr\n self.l2norm = l2norm\n self.optimizer = torch.optim.Adam(\n self.model.parameters(), lr=lr, weight_decay=l2norm)", "def set_learning_rate(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def set_learning_rate(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def build(optimizer_config, optimizer, total_step):\n optimizer_type = optimizer_config.WhichOneof('optimizer')\n\n if optimizer_type == 'rms_prop_optimizer':\n config = optimizer_config.rms_prop_optimizer\n lr_scheduler = _create_learning_rate_scheduler(\n config.learning_rate, optimizer, total_step=total_step)\n\n if optimizer_type == 'momentum_optimizer':\n config = optimizer_config.momentum_optimizer\n lr_scheduler = _create_learning_rate_scheduler(\n config.learning_rate, optimizer, total_step=total_step)\n\n if optimizer_type == 'adam_optimizer':\n config = optimizer_config.adam_optimizer\n lr_scheduler = _create_learning_rate_scheduler(\n config.learning_rate, optimizer, total_step=total_step)\n\n return lr_scheduler", "def adjust_learning_rate(args, optimizer, epoch):\n if (epoch*3==args.epochs) or (epoch*3==2*args.epochs):\n lr = args.lr * (0.1 ** (epoch*3//args.epochs))\n print(\"Changing Learning Rate to {}\".format(lr))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def lr_scheduler(optimizer, epoch, init_lr=0.1, lr_decay_epoch=100):\r\n\r\n if epoch % lr_decay_epoch == 0 and epoch > 1:\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = param_group['lr'] * 0.1\r\n\r\n return optimizer", "def configure_optimizers(self):\n return torch.optim.Adam(self.parameters(), lr=0.02)", "def lr_scheduler(optimizer, epoch, init_lr=0.1, lr_decay_epoch=100):\n\n if epoch % lr_decay_epoch == 0 and epoch > 1:\n for param_group in optimizer.param_groups:\n param_group['lr'] = param_group['lr'] * 0.1\n\n return optimizer", "def lr_scheduler(optimizer, epoch, init_lr=0.1, lr_decay_epoch=50):\n if epoch % lr_decay_epoch == 0 and epoch > 1:\n for param_group in optimizer.param_groups:\n param_group['lr'] = param_group['lr'] * 0.1\n return optimizer", "def lr_scheduler(optimizer, epoch, init_lr=0.1, lr_decay_epoch=50):\n if epoch % lr_decay_epoch == 0 and epoch > 1:\n for param_group in optimizer.param_groups:\n param_group['lr'] = param_group['lr'] * 0.1\n return optimizer", "def create_optimizer(init_lr, num_train_steps, num_warmup_steps):\n # Implements linear decay of the learning rate.\n learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(\n initial_learning_rate=init_lr,\n decay_steps=num_train_steps,\n end_learning_rate=0.0)\n if num_warmup_steps:\n learning_rate_fn = WarmUp(initial_learning_rate=init_lr,\n decay_schedule_fn=learning_rate_fn,\n warmup_steps=num_warmup_steps)\n optimizer = AdamWeightDecay(\n learning_rate=learning_rate_fn,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=[LAYER_NORM_NAME, 'bias'])\n return optimizer", "def adjust_learning_rate(optimizer, cur_epoch, base_lr=0.1, lr_schedule=[4, 8, 12, 14, 16]):\n lr = 0\n for i, e in enumerate(lr_schedule):\n if cur_epoch < e:\n lr = base_lr * (0.1 ** i)\n break\n if lr == 0:\n lr = base_lr\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):\n global_step = tf.compat.v1.train.get_or_create_global_step()\n\n learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)\n\n # Implements linear decay of the learning rate.\n learning_rate = tf.compat.v1.train.polynomial_decay(\n learning_rate,\n global_step,\n num_train_steps,\n end_learning_rate=0.0,\n power=1.0,\n cycle=False)\n\n # Implements linear warmup. I.e., if global_step < num_warmup_steps, the\n # learning rate will be `global_step/num_warmup_steps * init_lr`.\n if num_warmup_steps:\n global_steps_int = tf.cast(global_step, tf.int32)\n warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)\n\n global_steps_float = tf.cast(global_steps_int, tf.float32)\n warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)\n\n warmup_percent_done = global_steps_float / warmup_steps_float\n warmup_learning_rate = init_lr * warmup_percent_done\n\n is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)\n learning_rate = (\n (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)\n\n # It is recommended that you use this optimizer for fine tuning, since this\n # is how the model was trained (note that the Adam m/v variables are NOT\n # loaded from init_checkpoint.)\n optimizer = AdamWeightDecayOptimizer(\n learning_rate=learning_rate,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\n\n if use_tpu:\n optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)\n\n tvars = tf.compat.v1.trainable_variables()\n grads = tf.gradients(loss, tvars)\n\n # This is how the model was pre-trained.\n (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)\n\n train_op = optimizer.apply_gradients(\n zip(grads, tvars), global_step=global_step)\n\n # Normally the global step update is done inside of `apply_gradients`.\n # However, `AdamWeightDecayOptimizer` doesn't do this. But if you use\n # a different optimizer, you should probably take this line out.\n new_global_step = global_step + 1\n train_op = tf.group(train_op, [global_step.assign(new_global_step)])\n return train_op", "def create_lr_scheduler(\n self, optimizer: torch.optim.Optimizer # type: ignore\n ) -> Optional[LRScheduler]:\n pass", "def _create_optimizer(self):\n\n with tf.name_scope(\"optimizer\"):\n self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)", "def adjust_learning_rate(args,optimizer, epoch):\n \n args.epochs\n\n lr = args.lr * (\n (0.2 ** int(epoch >= args.epochs - 140))\n * (0.2 ** int(epoch >= args.epochs - 80))\n * (0.2 ** int(epoch >= args.epochs - 40))\n )\n\n ##lr = args.lr ##DELETE ME!\n\n if args.tensorboard:\n log_value(\"learning_rate\", lr, epoch)\n\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = lr", "def adjust_learning_rate(cfg, optimizer):\n for idx, group in enumerate(optimizer.param_groups):\n init_lr = cfg.TRAINING.LR\n if 'step' not in group:\n group['step'] = 0.\n else:\n group['step'] += 1.\n\n group['lr'] = init_lr * (\n 1.0 - float(group['step']) * float(cfg.TRAINING.BATCH_SIZE) /\n (cfg.TRAINING.N_TRIPLETS * float(cfg.TRAINING.EPOCHS)))\n return", "def adjust_learning_rate(optimizer, epoch, model_type):\n if model_type == 1:\n if epoch < 80:\n lr = args.lr\n elif epoch < 120:\n lr = args.lr * 0.1\n else:\n lr = args.lr * 0.01\n elif model_type == 2:\n if epoch < 60:\n lr = args.lr\n elif epoch < 120:\n lr = args.lr * 0.2\n elif epoch < 160:\n lr = args.lr * 0.04\n else:\n lr = args.lr * 0.008\n elif model_type == 3:\n if epoch < 150:\n lr = args.lr\n elif epoch < 225:\n lr = args.lr * 0.1\n else:\n lr = args.lr * 0.01\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def exp_lr_scheduler(optimizer, epoch, init_lr, lr_decay_epoch):\n\n lr = init_lr * (0.1**((epoch-1) // lr_decay_epoch))\n\n if (epoch-1) % lr_decay_epoch == 0:\n print('LR is set to {}'.format(lr))\n # log_value('lr',lr,epoch)\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n return optimizer", "def update_learning_rate(self):\n for scheduler in self.schedulers:\n if self.opt.lr_policy == 'plateau':\n scheduler.step(self.metric)\n else:\n scheduler.step()\n\n lr = self.optimizers[0].param_groups[0]['lr']\n print('learning rate = %.7f' % lr)", "def adjust_learning_rate(optimizer, epoch, lr_min, init_lr, lr_drop):\n lr = init_lr * (0.4 ** (epoch // lr_drop))\n if lr < lr_min:\n lr = lr_min\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n return optimizer, lr", "def init_optimizer(self, state_dict=None, use_gpu=True):\r\n param_optimizer = list(self.network.named_parameters())\r\n\r\n # There seems to be something that we can't\r\n param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]]\r\n\r\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\r\n optimizer_grouped_parameters = [\r\n {'params': [p for n, p in param_optimizer\r\n if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\r\n {'params': [p for n, p in param_optimizer\r\n if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}]\r\n\r\n num_train_optimization_steps = int(\r\n self.args.num_train_examples / self.args.batch_size / self.args.gradient_accumulation_steps) \\\r\n * self.args.num_epochs\r\n\r\n self.optimizer = AdamW(optimizer_grouped_parameters,\r\n lr=self.args.learning_rate)\r\n self.scheduler = WarmupLinearSchedule(self.optimizer,\r\n warmup_steps=self.args.warmup_steps,\r\n t_total=num_train_optimization_steps)\r\n\r\n if state_dict is not None:\r\n self.optimizer.load_state_dict(state_dict)\r\n # FIXME: temp soln - https://github.com/pytorch/pytorch/issues/2830\r\n if use_gpu:\r\n for state in self.optimizer.state.values():\r\n for k, v in state.items():\r\n if isinstance(v, torch.Tensor):\r\n state[k] = v.cuda()", "def create_custom_optimizer(tvars, loss, bert_init_lr, task_init_lr, num_train_steps, num_warmup_steps, use_tpu, global_step=None, freeze=-1, task_opt='adam', eps=1e-6):\n if global_step is None:\n global_step = tf.train.get_or_create_global_step()\n\n bert_learning_rate = tf.constant(value=bert_init_lr, shape=[], dtype=tf.float32)\n task_learning_rate = tf.constant(value=task_init_lr, shape=[], dtype=tf.float32)\n\n # Implements linear decay of the learning rate.\n bert_learning_rate = tf.train.polynomial_decay(\n bert_learning_rate,\n global_step,\n num_train_steps,\n end_learning_rate=0.0,\n power=1.0,\n cycle=False)\n task_learning_rate = tf.train.polynomial_decay(\n task_learning_rate,\n global_step,\n num_train_steps,\n end_learning_rate=0.0,\n power=1.0,\n cycle=False)\n\n ## TODO DEBUG\n print(f'type task learning rate: {type(task_learning_rate)} ##################')\n\n\n # Implements linear warmup. I.e., if global_step < num_warmup_steps, the\n # learning rate will be `global_step/num_warmup_steps * init_lr`.\n if num_warmup_steps:\n global_steps_int = tf.cast(global_step, tf.int32)\n warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)\n\n global_steps_float = tf.cast(global_steps_int, tf.float32)\n warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)\n\n warmup_percent_done = global_steps_float / warmup_steps_float\n bert_warmup_learning_rate = bert_init_lr * warmup_percent_done\n task_warmup_learning_rate = task_init_lr * warmup_percent_done\n\n is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)\n bert_learning_rate = (\n (1.0 - is_warmup) * bert_learning_rate + is_warmup * bert_warmup_learning_rate)\n\n # It is recommended that you use this optimizer for fine tuning, since this\n # is how the model was trained (note that the Adam m/v variables are NOT\n # loaded from init_checkpoint.)\n bert_optimizer = AdamWeightDecayOptimizer(\n learning_rate=bert_learning_rate,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=eps,\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\n if task_opt == 'adam_weight_decay':\n task_optimizer = AdamWeightDecayOptimizer(\n learning_rate=task_learning_rate,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=eps\n )\n elif task_opt == 'adam':\n task_optimizer = tf.train.AdamOptimizer(\n learning_rate=task_learning_rate)\n else:\n raise NotImplementedError('Check optimizer. {} is invalid.'.format(task_opt))\n\n # tvars = tf.trainable_variables()\n bert_vars, task_vars = [], []\n for var in tvars:\n if var.name.startswith('bert'):\n can_optimize = False\n if var.name.startswith('bert/encoder/layer_') and int(var.name.split('/')[2][len('layer_'):]) >= freeze:\n can_optimize = True\n if freeze == -1 or can_optimize:\n bert_vars.append(var)\n else:\n task_vars.append(var)\n print('bert:task', len(bert_vars), len(task_vars))\n grads = tf.gradients(loss, bert_vars + task_vars)\n bert_grads = grads[:len(bert_vars)]\n task_grads = grads[len(bert_vars):]\n\n # This is how the model was pre-trained.\n (bert_grads, _) = tf.clip_by_global_norm(bert_grads, clip_norm=1.0)\n (task_grads, _) = tf.clip_by_global_norm(task_grads, clip_norm=1.0)\n\n # global_step1 = tf.Print(global_step, [global_step], 'before')\n bert_train_op = bert_optimizer.apply_gradients(\n zip(bert_grads, bert_vars), global_step=global_step)\n task_train_op = task_optimizer.apply_gradients(\n zip(task_grads, task_vars), global_step=global_step)\n if task_opt == 'adam_weight_decay':\n new_global_step = global_step + 1\n train_op = tf.group(bert_train_op, task_train_op, [global_step.assign(new_global_step)])\n else:\n train_op = tf.group(bert_train_op, task_train_op)\n return train_op", "def __init__(self, epochs=1, opt_fn=optim.SGD, lr=1e-2, lr_decay=DecayType.NO, momentum=0.9,\n momentum_decay=DecayType.NO, beta=None, wds=None, wd_loss=True):\n self.epochs, self.opt_fn, self.lr, self.momentum, self.beta, self.wds = epochs, opt_fn, lr, momentum, beta, wds\n if isinstance(lr_decay,tuple): self.lr_decay, self.extra_lr = lr_decay\n else: self.lr_decay, self.extra_lr = lr_decay, None\n if isinstance(momentum_decay,tuple): self.mom_decay, self.extra_mom = momentum_decay\n else: self.mom_decay, self.extra_mom = momentum_decay, None\n self.wd_loss = wd_loss", "def adjust_learning_rate(optimizer, epoch, args):\n lr = args.lr\n if args.cos: # cosine lr schedule\n lr *= 0.5 * (1. + math.cos(math.pi * epoch / args.epochs))\n else: # stepwise lr schedule\n for milestone in args.schedule:\n lr *= 0.1 if epoch >= milestone else 1.\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def _initScheduler(self) -> torch.optim.lr_scheduler.ReduceLROnPlateau:\n\n return torch.optim.lr_scheduler.ReduceLROnPlateau(\n self.optimizer, \n mode=cfg.training.scheduler_mode,\n factor=cfg.training.scheduler_factor,\n patience=cfg.training.scheduler_patience,\n threshold=cfg.training.scheduler_threshold\n )", "def adjust_learning_rate(self, optimizer, epoch, args):\n lr = args.learning_rate * (0.1 ** (epoch // 30))\n # print(lr)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate_and_learning_taks(optimizer, epoch, args):\n if epoch >= args.step2: \n lr = args.lr * 0.01\n elif epoch >= args.step1:\n lr = args.lr * 0.1\n else:\n lr = args.lr\n \n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n # Return training classes\n return range(len(args.dataset))", "def _adjust_learning_rate_resnet(optimizer, epoch):\n\n if epoch == 90:\n return lr_scheduler.MultiStepLR(optimizer, [30, 60, 80])\n elif epoch == 270: # autoaugment\n return lr_scheduler.MultiStepLR(optimizer, [90, 180, 240])\n else:\n raise ValueError('invalid epoch=%d for resnet scheduler' % epoch)", "def init_optimizer(network, config):\n # define optimizer and loss\n if config.optimizer == 'adadelta':\n opt = torch.optim.Adadelta(network.parameters(), lr=config.lr, weight_decay=config.weight_decay)\n elif config.optimizer == 'adam':\n opt = torch.optim.Adam(network.parameters(), lr=config.lr, weight_decay=config.weight_decay)\n elif config.optimizer == 'rmsprop':\n opt = torch.optim.RMSprop(network.parameters(), lr=config.lr, weight_decay=config.weight_decay)\n return opt", "def _create_learning_rate_scheduler(learning_rate_config, optimizer, last_step=-1):\n lr_scheduler = None\n learning_rate_type = learning_rate_config.name \n if learning_rate_type == 'constant_learning_rate': \n\n lr_scheduler = learning_schedules.Constant(\n optimizer, last_step=last_step)\n\n if learning_rate_type == 'exponential_decay_learning_rate':\n config = learning_rate_config \n lr_scheduler = learning_schedules.ExponentialDecay(\n optimizer, config.decay_steps, \n config.decay_factor, config.staircase, last_step=last_step)\n\n if learning_rate_type == 'manual_step_learning_rate':\n config = learning_rate_config\n if not config.schedule:\n raise ValueError('Empty learning rate schedule.')\n learning_rate_step_boundaries = [x.step for x in config.schedule]\n learning_rate_sequence = [config.initial_learning_rate]\n learning_rate_sequence += [x.learning_rate for x in config.schedule]\n lr_scheduler = learning_schedules.ManualStepping(\n optimizer, learning_rate_step_boundaries, learning_rate_sequence, \n last_step=last_step)\n\n if learning_rate_type == 'cosine_decay_learning_rate':\n config = learning_rate_config.cosine_decay_learning_rate\n lr_scheduler = learning_schedules.CosineDecayWithWarmup(\n optimizer, config.total_steps, \n config.warmup_learning_rate, config.warmup_steps, \n last_step=last_step)\n\n if lr_scheduler is None:\n raise ValueError('Learning_rate %s not supported.' % learning_rate_type)\n\n return lr_scheduler", "def adjust_learning_rate(self, optimizer, epoch, initial_lr, writer=None):\n lr = initial_lr * (0.98 ** epoch)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n if writer:\n writer.add_scalar(\"lr_G\", lr, epoch + 1)", "def _cook_optimizer(self, \n lr = 0.01, \n optimizer = 'sgd',\n l1_coeff = 0.00001,\n l2_coeff = 0.00001):\n with tf.variable_scope (self.name + '_train') as scope:\n apply_regularizer (name = self.name, var_list = tf.get_collection(\n self.name + '_regularizer_worthy_params'), \n l1_coeff = l1_coeff,\n l2_coeff = l2_coeff )\n self.obj = tf.add_n(tf.get_collection( self.name + '_objectives'), name='objective')\n tf.summary.scalar('total_objective', self.obj)\n\n # Change (supply as arguments) parameters here directly in the code.\n if optimizer == 'sgd': \n self.back_prop = apply_gradient_descent(var_list = tf.get_collection(\n self.name + '_trainable_params'),\n obj = self.obj, learning_rate = lr )\n elif optimizer == 'adagrad': \n self.back_prop = apply_adagrad(var_list = tf.get_collection(\n self.name + '_trainable_params'),\n obj = self.obj, learning_rate = lr ) \n elif optimizer == 'rmsprop':\n self.back_prop = apply_rmsprop(var_list = tf.get_collection(\n self.name + '_trainable_params') ,\n obj = self.obj, learning_rate = lr)\n elif optimizer == 'adam':\n self.back_prop = apply_adam (var_list = tf.get_collection(\n self.name + '_trainable_params') ,\n obj = self.obj, learning_rate = lr )\n else:\n raise Error('Invalid entry to optimizer')", "def adjust_learning_rate(optimizer, epoch, args):\n lr = args.lr\n if 20 < epoch <= 30:\n lr = 0.0001\n elif 30 < epoch :\n lr = 0.00001\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n print(\"learning rate -> {}\\n\".format(lr))", "def initialize_optimization(self):\n\n if self.FLAGS.optimizer == \"Adam\" :\n self.solver = tf.train.AdamOptimizer(\n learning_rate = self.learning_rate,\n beta1 = self.FLAGS.beta1,\n beta2 = self.FLAGS.beta2)\n else:\n print(\"ERROR: Cannot handle optimizer type {}!!!\".format(self.FLAGS.optimizer))\n raise RuntimeError\n \n # batch normalization in tensorflow requires this extra dependency\n # this is required to update the moving mean and moving variance variables\n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(extra_update_ops):\n self.update = self.solver.minimize(self.loss, global_step=self.global_step)", "def add_optimizer(self):\n \n with tf.variable_scope(\"optimizer\"):\n\n # Define optimizer and minimize loss\n if self.OPTIM == \"RMSProp\":\n self.optimizer = tf.train.RMSPropOptimizer(self.LEARN_RATE).\\\n minimize(self.cost)\n \n elif self.OPTIM == \"GD\":\n self.optimizer = tf.train.GradientDescentOptimizer(self.LEARN_RATE).\\\n minimize(self.cost)\n \n elif self.OPTIM == \"Adam\":\n self.optimizer = tf.train.AdamOptimizer(self.LEARN_RATE).\\\n minimize(self.cost)\n\n # Merge all summaries for tensorboard\n #self.tbsummaries = tf.summary.merge_all()", "def init_optimizer(model, config, exact_layers=None):\n opt_type = config.optimizer\n if exact_layers:\n logger.info('Learning exact layers, number=%d', len(exact_layers))\n parameters = []\n for i, layer in enumerate(exact_layers):\n if isinstance(layer, tuple) and len(layer) == 2:\n layer, multiplier = layer\n init_multiplier = 1\n elif isinstance(layer, tuple) and len(layer) == 3:\n layer, init_multiplier, multiplier = layer\n else:\n multiplier = 1\n init_multiplier = 1\n lr = config.lr * multiplier\n init_lr = config.lr * multiplier * init_multiplier\n logger.info('Layer=%d, lr=%.5f', i, init_lr)\n parameters.append({'params': layer.parameters(), 'lr': init_lr, 'after_warmup_lr': lr})\n else:\n logger.info('Optimizing all parameters, lr=%.5f', config.lr)\n parameters = model.parameters()\n\n if opt_type == 'sgd':\n optimizer = torch.optim.SGD(parameters, config.lr, momentum=config.momentum, weight_decay=config.weight_decay)\n elif opt_type == 'adam':\n optimizer = torch.optim.Adam(parameters, lr=config.lr, weight_decay=config.weight_decay)\n elif opt_type == 'yf':\n optimizer = YFOptimizer(parameters, config.lr, mu=config.momentum, weight_decay=config.weight_decay,\n clip_thresh=0.1)\n else:\n raise TypeError, 'Unknown optimizer type=%s' % (opt_type, )\n return optimizer", "def __init__(self, state_dim, action_dim, learning_rate, weight_decay):\n self.dynamics_net = ForwardModel(state_dim, action_dim)\n self.rewards_net = RewardModel(state_dim, action_dim)\n self.done_net = RewardModel(state_dim, action_dim)\n\n self.dyn_optimizer = tfa_optimizers.AdamW(\n learning_rate=learning_rate, weight_decay=weight_decay)\n self.reward_optimizer = tfa_optimizers.AdamW(\n learning_rate=learning_rate, weight_decay=weight_decay)\n self.done_optimizer = tfa_optimizers.AdamW(\n learning_rate=learning_rate, weight_decay=weight_decay)" ]
[ "0.7660184", "0.7521174", "0.7476974", "0.7435311", "0.7368797", "0.72984624", "0.72553366", "0.7180905", "0.715223", "0.7137306", "0.7121896", "0.71121275", "0.70854396", "0.7073759", "0.70551753", "0.700759", "0.70071536", "0.69621265", "0.69621265", "0.69529146", "0.69134176", "0.6907368", "0.6895629", "0.68793505", "0.6859542", "0.6838025", "0.6838025", "0.6833091", "0.68052894", "0.6797865", "0.6783162", "0.6736418", "0.67238736", "0.67220557", "0.67179507", "0.6706665", "0.6704935", "0.6699889", "0.6693561", "0.66608876", "0.665406", "0.66427964", "0.66378534", "0.66357654", "0.66281796", "0.6614416", "0.66140354", "0.66119593", "0.66114956", "0.659544", "0.65869844", "0.658171", "0.6579899", "0.6571118", "0.6551252", "0.65267086", "0.6519587", "0.65126204", "0.651059", "0.64980584", "0.6493374", "0.6492181", "0.6488689", "0.6481246", "0.6481246", "0.6469695", "0.64676476", "0.64647585", "0.6462166", "0.6443182", "0.6438191", "0.6438191", "0.64221996", "0.6414663", "0.6408309", "0.64041996", "0.6394961", "0.6389093", "0.63857895", "0.63803136", "0.6380128", "0.6362866", "0.6352333", "0.63510513", "0.6347871", "0.63308483", "0.63252383", "0.63252354", "0.6319231", "0.63143855", "0.63133895", "0.6310742", "0.62757975", "0.6273192", "0.6260281", "0.62519294", "0.625091", "0.6248867", "0.624731", "0.6241912" ]
0.6385241
79
Close the session and the Thrift transport.
def close(self): # PEP 249 rpc.close_service(self.service)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close(self):\n self.__session.close()", "def close(self):\n self.__session.close()", "def close(self):\n self.__session.close()", "def close(self):\n self.__session.close()", "def close(self):\n self.__session.close()", "def close(self):\n self.__session.close()", "def close(self) -> None:\n\n self._session.close()", "def close(self) -> None:\n if self.client is None:\n return\n\n if logger is not None:\n # If called from __del__(), module variables may no longer exist.\n logger.debug(f\"Closing Thrift transport to {self.host}:{self.port}\")\n\n self.client.close()\n self.client = None", "def close(self):\n self.session.close()", "def close(self):\n self.session.close()\n self.session = None", "def close(self):\n\t\tself._client.close_session()", "def close_session(self):\n if not self.is_open():\n return\n\n try:\n # Fetch any active result\n self.fetch_active_result()\n # Deallocate all prepared statements\n if self._prepared_stmt_supported:\n for stmt_id in self._prepared_stmt_ids:\n self.protocol.send_prepare_deallocate(stmt_id)\n self._stmt_counter = 0\n # Send session close\n self.protocol.send_close()\n self.protocol.read_ok()\n except (InterfaceError, OperationalError, OSError) as err:\n _LOGGER.warning(\n \"Warning: An error occurred while attempting to close the \"\n \"connection: %s\",\n err,\n )\n finally:\n # The remote connection with the server has been lost,\n # close the connection locally.\n self.stream.close()", "async def close(self):\n await self._http_session.close()", "def close_session(self):\n self.sess.close()", "async def close_session(self):\n await self._client_session.close()", "def close(self):\n self.session.close(SessionCloseErrorCode.SESSION_DIED)", "async def close(self) -> None:\n if self._session and self._close_session:\n await self._session.close()", "def close_connection(self):\n\t\tself.session.close()", "def close_connection(self):\n self.session.close()", "async def close(self):\n try:\n self.logger.debug(\"Closing session\")\n if self.key in self._ALL_SESSIONS:\n del self._ALL_SESSIONS[self.key]\n finally:\n self.inc_counter(\"%s.closed\" % self.objname)\n await self._close()\n if self._cmd_stream is not None:\n self._cmd_stream.close()\n self._connected = False", "def close(self):\n yield from self.session.close()", "def _close(self):\n if self.__session is not None:\n self._rollback()\n self.__session.close()", "def close(self):\n self.password = None\n self.session.close()", "def close(self):\n self._client.close()\n self._transport.close()", "def close_session(self):\n self.session.execute(QUERY_DROP_TABLE_1)\n self.session.execute(QUERY_DROP_TABLE_2)\n self.session.execute(QUERY_DROP_TABLE_3)\n self.session.shutdown()\n self.cluster.shutdown()", "def close(self):\n self.__session.remove()", "def close(self):\n self.__session.remove()", "def close(self):\n self.__session.remove()", "def close(self) -> None:\n self.transport.close()", "def SCPI_sock_close(session):\n \n session.close()", "async def close(self):\n # This essensially stops self.session from running\n if self.session is not None:\n # Close the session, or aiohttp complains\n await self.session.close()\n # Set our session back to its default value\n self.session = None", "def close(self):\n self.sess.close()\n print(\"Current session closed!\")", "async def close(self):\n\n if self.closed:\n return\n\n await super().close()\n\n if self.socket:\n await self.socket.close()\n\n await self._session.close()", "def close(self):\n if self._tr is not None:\n self._tr.close()", "def close(self):\n\n if self._session:\n sid = id(self._session)\n self._session.close()\n del self._session\n self._session = None\n\n if logging:\n msg = u'Internal requests Session instance 0x{0:x} closed and cleaned up'\n utils.log_with_debug_info(logging.DEBUG, msg.format(sid))", "def close(self):\n if self._chan is not None:\n self._chan.close()\n if self._session is not None:\n self._session.close()", "def __del__(self):\n\n self.session.close()", "def close_db_session(self, session):\r\n session.close()", "def close(self):\n self.sess.close()", "def _close_http_session(self):\n if self._api_http_session:\n try:\n self._api_http_session.close()\n except RequestException:\n pass", "def close(self):\n if self.transport.isOpen():\n self.transport.close()", "def close(self):\n _LOGGER.debug(\"Closing websocket connections\")\n if self._remote_task:\n self._remote_task.cancel()\n if self._control_task:\n self._control_task.cancel()\n if self._app_monitor_task:\n self._app_monitor_task.cancel()\n if self._managed_session:\n _LOGGER.debug(\"Closing managed ClientSession\")\n self._loop.create_task(self.session.close())", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.session.close()", "def close(self):\n self._connection.close()", "def close(self):\n asyncio.ensure_future(self.__inner_protocol.release())", "def close(self):\n with self._transport_lock:\n self._transport.close()\n\n with self._store_lock:\n self._packet_store.clear_all()", "def close(self):\n # XXX should be invoking a method in self.methods?\n if self.debug:\n logger.debug(\"%s: Closing.\", str(self))\n\n # Cleanup any locks\n locked = self.server.unlock_target_any(self)\n method = getattr(self.methods, \"rpc_unlock\", None)\n if method is not None:\n try:\n # Let the user know.\n for target in locked:\n method(self, None, target)\n except Exception as ex:\n if self.debug:\n logger.debug(\"%s: Ignoring exception in rpc_unlock during close: %s\", str(self),\n str(ex))\n try:\n super(NetconfServerSession, self).close()\n except EOFError:\n if self.debug:\n logger.debug(\"%s: EOF error while closing\", str(self))\n\n if self.debug:\n logger.debug(\"%s: Closed.\", str(self))", "def release(self):\n\n if self.session is not None:\n self.session.close()", "def close(self):\n\n\t\tif self._session:\n\t\t\tsid = id(self._session)\n\t\t\tself._session.close()\n\t\t\tdel self._session\n\t\t\tself._session = None\n\n\t\t\tmsg = u'Internal requests Session instance 0x{0:x} closed and cleaned up'\n\t\t\tlog_with_debug_info(logging.DEBUG, msg.format(sid))", "def shutdown(self):\n ts.client.transport.close()", "def close_connection(self):\n super().close_session()", "def __del__(self):\n try:\n self.api.transport.session.close()\n except Exception as e:\n log.debug(f\"Failed to close VSO API connection with: {e}\")", "def close(self):\n with self.connlock:\n self.conn.do_close()", "async def close(self) -> None:\n\n # for conn_handle in self._conn_handles:\n # await agent.agent_close_connection(conn_handle)\n # self._conn_handles.clear()\n await wallet.close_wallet(self.wallet_handle)", "def close(self):\n self.context['socket'].close()", "def close(self):\n\n self.conn.close()", "def close(self):\n self._conn.close()", "async def close(self) -> None:\n await self._ws.close()\n await self._session.close()\n await sleep(0.25)", "def close_session(self, message):\n pass", "def close(self):\r\n # close the connection and the socket\r\n self.conn.close()\r\n self.theSocket.close()", "def __del__(self):\n self._close_http_session()", "def closeConnection(self):\n self.engine.close()", "def close(self):\n self.conn.close()", "def close(self):\n self.conn.close()", "def close(self):\n self.conn.close()", "async def async_close_session(self) -> None:\n if not self.token:\n return\n\n await self._async_ws_set_function(CMD_LOGOUT, {})\n self.token = None", "def close(self):\n self._update_cond()\n pn_connection_close(self._impl)\n if hasattr(self, '_session_policy'):\n # break circular ref\n del self._session_policy", "def close(self):\n self.connection.close()", "def close(self):\n self.connection.close()", "def close(self):\n self.connection.close()", "def close(self):\n self.connection.close()", "def close(self):\n self.connection.close()", "def close(self):\n self.connection.close()", "def close(self):\n self.connection.close()", "def close(self) -> None:\n # Make sure we don't leak connections after finising insertion.\n if self.__batch:\n raise Exception('Logic error, opened a batch without closing!')\n if self.__session is not None:\n self.__session.close()\n if self.__conn is not None:\n self.__conn.close()\n self.__conn = None\n if self.__engine is not None:\n self.__engine.dispose()\n self.__engine = None", "def close(self) -> None:\n self._connection.close()", "async def close_connection(self):\n await self.websession.close()", "def close(self):\n self.conn.close()\n self.destroy()", "def close(self):\n self.__connection.close()", "def close(self):\r\n if self._session:\r\n self._session.close()\r\n self._session = None\r\n try:\r\n self._writer.remove_file()\r\n self._reader.remove_file()\r\n except Oct2PyError:\r\n pass", "def close(self):\n if self.sftp_open:\n self.sftp.close()\n self.sftp_open = False\n self.transport.close()", "def close(self): \n self.connection.close()", "def close(self):\n\n\t\t# close connection\n\t\tself.eyetribe.close()\n\t\tself.connected = False", "def close_connection(self):\n\n try:\n requests.delete(\n f\"https://{self.hostname}:{self.port}/{self._ENDPOINTS['tokens']}/{self.session_id}\",\n headers=self._headers,\n verify=False\n )\n\n del SessionManager._SESSIONS[self.__connection_hash]\n\n except requests.exceptions.RequestException as error:\n logging.exception(error)\n raise ConnectionError(\"Error closing connection\")", "def db_close_conn(tunnel, engine):\n engine.dispose()\n tunnel.close()", "def Close(self):\n self._sock.close()", "def close(self) -> None:\n self.connection.close()", "def close(self):\n self._sock.close()\n self._sock = None", "async def close(self):\n await self._connection.close()", "def close(self):\n self.closeFileWriter()\n try: self._sess.close()\n except: pass # Is already closed.", "async def end_session(self):\n\t\t...", "async def close(self) -> None:\n logger.debug(\n f\"['{self._instance_connection_string}']: Waiting for _current to be cancelled\"\n )\n self._current.cancel()\n logger.debug(\n f\"['{self._instance_connection_string}']: Waiting for _next to be cancelled\"\n )\n self._next.cancel()\n logger.debug(\n f\"['{self._instance_connection_string}']: Waiting for _client_session to close\"\n )\n await self._client_session.close()", "def closeConnecetion(self):\n client.close()", "def close(self):\n self.control_conn.sendall('CLOSE'.encode())", "def close(self):\n # There isnt active session\n if self._key is None:\n LOG.debug('There isnt active session - skipping session close.')\n return\n\n # Try to close active session\n path = 'credentials/' + self._key\n try:\n self.delete(path)\n except Exception as error:\n LOG.warning('Cannot close StoreServ 3PAR session '\n 'gracefully. Exception occured: %s',\n repr(error))\n else:\n self._headers.pop('X-HP3PAR-WSAPI-SessionKey')\n self._key = None", "def close(self):\n try:\n self.connection.Close()\n del self.connection\n except:\n pass", "def close(self):\n self._s.close()", "def close(self):\n for session in self.sessions.copy().values():\n session.close(SessionCloseErrorCode.SESSION_DIED)", "def close(self):\n try:\n self.tunnel.close()\n self.transport.close()\n except Exception as exp:\n logging.error('Unable to close the device handle: %s' % exp)\n raise TobyException('Unable to close the device handle: %s' % exp)\n return True", "def close_session(self):\n try:\n if self.get_client():\n self.get_client().close()\n Session.SESSIONS_COUNT -= 1\n return True\n else:\n return False\n except error as e:\n print(\"closing session error\")\n self._error = e\n return False", "def close(self) -> None:\n self.real_conn.close()" ]
[ "0.78195184", "0.78195184", "0.78195184", "0.78195184", "0.78195184", "0.78195184", "0.7808701", "0.7711172", "0.76763135", "0.7660848", "0.7508555", "0.7420375", "0.7418072", "0.7413811", "0.7404994", "0.732449", "0.7322094", "0.722286", "0.7220323", "0.71922815", "0.712453", "0.70434505", "0.7033646", "0.7023439", "0.6988567", "0.698168", "0.698168", "0.698168", "0.6968098", "0.6950131", "0.6941707", "0.69316626", "0.6912722", "0.68828285", "0.67845017", "0.6782111", "0.67433584", "0.67149454", "0.6704356", "0.66989946", "0.6643511", "0.66415536", "0.6636442", "0.6630582", "0.66096014", "0.6602376", "0.65993255", "0.6576401", "0.6569639", "0.65691537", "0.6540773", "0.65333754", "0.6532726", "0.65302664", "0.652597", "0.65203166", "0.6519247", "0.6500017", "0.64804775", "0.64727306", "0.64535", "0.6450025", "0.6427019", "0.6427019", "0.6427019", "0.6412418", "0.6408747", "0.63913816", "0.63913816", "0.63913816", "0.63913816", "0.63913816", "0.63913816", "0.63913816", "0.6382441", "0.6370632", "0.63642895", "0.63591355", "0.635869", "0.63499945", "0.63403875", "0.633626", "0.6334513", "0.63319474", "0.6319321", "0.63160974", "0.63102645", "0.62938523", "0.62895465", "0.6287521", "0.6286668", "0.62553114", "0.625099", "0.62503445", "0.62487555", "0.62464005", "0.62453634", "0.62416494", "0.62408006", "0.6227297", "0.62229776" ]
0.0
-1
Impala doesn't support transactions; does nothing.
def commit(self): # PEP 249 pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def supports_transactions(self):\n return self._mysql_storage_engine != \"MyISAM\"", "def runSqlNoTransaction(self, sql):\r\n self.c.autocommit = True\r\n cursor = self.c.cursor()\r\n cursor.execute(sql)\r\n self.c.commit()\r\n cursor.close()\r\n self.c.autocommit = False\r\n return True", "def _do_commit(self):", "def begin_transaction(self):\r\n self.db.commit(False)", "def commit(self, transaction):\n raise NotImplementedError", "def start_transaction(self):\n self._db.autocommit = False", "def test_rollback(self, local_connection):\n\n users = self.tables.users\n connection = local_connection\n transaction = connection.begin()\n connection.execute(users.insert(), dict(user_id=1, user_name=\"user1\"))\n connection.execute(users.insert(), dict(user_id=2, user_name=\"user2\"))\n connection.execute(users.insert(), dict(user_id=3, user_name=\"user3\"))\n transaction.rollback()\n result = connection.exec_driver_sql(\"select * from users\")\n assert len(result.fetchall()) == 0", "def start_transaction(self):\n self._connection.execute_nonquery(\"sql\", \"START TRANSACTION\", True)", "def with_transaction(session, f):\n try:\n f(session)\n session.commit()\n except Exception as e:\n session.rollback()\n raise e", "def commit_transaction(self) -> None:\n pass", "def custom_statement(statement):\n try:\n conn = _access()\n cur = conn.cursor()\n conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n cur.execute(\"{}\".format(statement))\n conn.close()\n return True\n except IntegrityError, ProgrammingError:\n return False", "def start_transaction_sql(self):\n return \"BEGIN TRANSACTION\"", "def start_transaction_sql(self):\n return 'START TRANSACTION;'", "def test_execute_transaction(data_manager):\n class NotFred(Exception):\n pass\n\n try:\n with data_manager.dal():\n session = current_context.sqlalchemy\n metadata.create_all(session.bind)\n \n session.execute(text('INSERT INTO test (name) VALUES (:name)'), {'name': 'Fred'})\n record = session.execute(text('SELECT * FROM test')).first()\n assert record.name == 'Fred'\n\n # Now error out to remove \"Fred\"\n raise NotFred('Do not like Fred')\n except NotFred:\n pass\n\n with data_manager.dal():\n session = current_context.sqlalchemy\n record = session.execute(text('SELECT * FROM test')).first()\n # Fred should have been rolled back\n assert not record", "def transactionEnd(self):\n self.connection.commit()", "def commit_query(conn):\r\n conn.commit()", "def maybe_commit(job):", "def start_transaction(self) -> None:\n pass", "def transaction(self, context: InjectionContext = None) -> \"ProfileSession\":", "def prepare_for_commit(self):", "def dbtransaction(request):\n connection = engine.connect()\n transaction = connection.begin()\n\n def teardown():\n # Explicitly remove the session so that we'll get a new session every time we go here.\n transaction.rollback()\n connection.close()\n\n request.addfinalizer(teardown)\n return connection", "def manual_transaction():\r\n try:\r\n yield\r\n except Exception:\r\n transaction.rollback()\r\n log.exception('Due to an error, this transaction has been rolled back')\r\n raise\r\n else:\r\n transaction.commit()", "def supports_transactions(self):\n return False", "def _commit(self):\n if self.__session is not None:\n self.__session.commit()", "def abort_transaction(self) -> None:\n pass", "def txn(db):\n with db.atomic() as txn:\n yield\n txn.rollback()", "def transaction_failed(self):", "async def test_transaction_commit_low_level(database_url):\n\n async with Database(database_url) as database:\n async with database.transaction(force_rollback=True):\n transaction = await database.transaction()\n try:\n query = notes.insert().values(text=\"example1\", completed=True)\n await database.execute(query)\n except: # pragma: no cover\n await transaction.rollback()\n else:\n await transaction.commit()\n\n query = notes.select()\n results = await database.fetch_all(query=query)\n assert len(results) == 1", "async def test_transaction_rollback_low_level(database_url):\n\n async with Database(database_url) as database:\n async with database.transaction(force_rollback=True):\n transaction = await database.transaction()\n try:\n query = notes.insert().values(text=\"example1\", completed=True)\n await database.execute(query)\n raise RuntimeError()\n except:\n await transaction.rollback()\n else: # pragma: no cover\n await transaction.commit()\n\n query = notes.select()\n results = await database.fetch_all(query=query)\n assert len(results) == 0", "def startTransaction(self) -> int:\n ...", "def _end_transaction(self):\n self._db.autocommit=True", "def cypher_transaction():\n session = cypher.Session(HOST)\n return session.create_transaction()", "def commit(self):", "def BeginTransaction(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def rollback(self):\n # PEP 249\n raise impala.error.NotSupportedError()", "def __commit(self):\n from sqlalchemy.exc import IntegrityError\n\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()", "async def test_transaction_rollback(database_url):\n\n async with Database(database_url) as database:\n async with database.transaction(force_rollback=True):\n try:\n async with database.transaction():\n query = notes.insert().values(text=\"example1\", completed=True)\n await database.execute(query)\n raise RuntimeError()\n except RuntimeError:\n pass\n\n query = notes.select()\n results = await database.fetch_all(query=query)\n assert len(results) == 0", "def commit(self):\n self.sql_session.commit()", "def test_auto_rollback(self):\n self.mocked_cursor.execute.side_effect = psycopg2.Error('testing')\n\n db = database.Database()\n try:\n db.execute(sql=\"SELECT * from FOO WHERE bar LIKE 'baz'\")\n except database.DatabaseError:\n pass\n\n self.assertEqual(self.mocked_connection.rollback.call_count, 1)", "def start_transaction(self):\n raise Unsupported()", "def _do_rollback(self):\n self.backend.rollback()", "def commit(self):\n self._connection.execute_nonquery(\"sql\", \"COMMIT\", True)", "def RollBack(self):\r\n self.conn.rollback()", "def abort(self, transaction):\n raise NotImplementedError", "def commit(self):\n self.execute_sql(sql.commit)\n self.under_transaction = False", "async def transact(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:\n stats.inc('transactions', 'SQL')\n async with self.pool.acquire() as conn:\n async with conn.cursor() as cur:\n try:\n ret = await func(cur, *args, **kwargs)\n except:\n await conn.rollback()\n raise\n else:\n await conn.commit()\n return ret", "async def test_rollback_isolation(database_url):\n\n async with Database(database_url) as database:\n # Perform some INSERT operations on the database.\n async with database.transaction(force_rollback=True):\n query = notes.insert().values(text=\"example1\", completed=True)\n await database.execute(query)\n\n # Ensure INSERT operations have been rolled back.\n query = notes.select()\n results = await database.fetch_all(query=query)\n assert len(results) == 0", "def commit(self):\n if self.transaction:\n self.conn.commit()\n self.transaction = False", "def test_rollback():", "def __call__(self):\n session = self._session()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()", "def create_transaction(conn,task):\r\n sql = ''' INSERT OR IGNORE INTO emp1(emp_id,location,login_time)\r\n VALUES(?,?,?) '''\r\n c = conn.cursor()\r\n c.execute(sql,task)\r\n conn.commit()", "def test_execute(data_manager):\n with data_manager.dal():\n session = current_context.sqlalchemy\n assert isinstance(session, orm.Session)\n\n metadata.create_all(session.bind)\n \n session.execute(text('INSERT INTO test (name) VALUES (:name)'), {'name': 'Fred'})\n record = session.execute(text('SELECT * FROM test')).first()\n assert record.name == 'Fred'\n\n # Make sure Fred still exists after transaction\n with data_manager.dal():\n session = current_context.sqlalchemy\n assert isinstance(session, orm.Session)\n\n record = session.execute(text('SELECT * FROM test')).first()\n assert record.name == 'Fred'", "def commit():\n get_db().commit()", "async def test_transaction_commit(database_url):\n async with Database(database_url) as database:\n async with database.transaction(force_rollback=True):\n async with database.transaction():\n query = notes.insert().values(text=\"example1\", completed=True)\n await database.execute(query)\n\n query = notes.select()\n results = await database.fetch_all(query=query)\n assert len(results) == 1", "def transaction():\n try:\n conn = connect_to_db()\n yield conn\n conn.commit()\n except Exception as e:\n print(e)\n conn.rollback()\n finally:\n conn.close()", "def transaction(fn):\n @wraps(fn)\n def transaction_inner(*args, **kwargs): #1\n start = time()\n stmp_id = id_gen()\n session = operation.session\n sessionid = id(session)\n \n # set distributed transaction id to 0 for single transaction\n try:\n operation.id\n except: \n operation.id = str(uuid4())\n \n try:\n # get runtime info\n cp = current_process()\n ct = current_thread() \n \n # format request params\n params = []\n for item in args:\n params.append(unicode(item))\n for k,v in kwargs.iteritems():\n params.append(u\"'%s':'%s'\" % (k, v))\n \n # call internal function\n res = fn(*args, **kwargs)\n \n session.commit()\n elapsed = round(time() - start, 4)\n logger.debug(u'%s.%s - %s - transaction - %s - %s - OK - %s' % (\n operation.id, stmp_id, sessionid, fn.__name__, \n params, elapsed))\n \n return res\n except ModelError as ex:\n elapsed = round(time() - start, 4)\n logger.error(u'%s.%s - %s - transaction - %s - %s - KO - %s' % (\n operation.id, stmp_id, sessionid, fn.__name__, \n params, elapsed))\n if ex.code not in [409]:\n #logger.error(ex.desc, exc_info=1)\n logger.error(ex.desc)\n \n session.rollback()\n raise TransactionError(ex.desc, code=ex.code)\n except IntegrityError as ex:\n elapsed = round(time() - start, 4)\n logger.error(u'%s.%s - %s - transaction - %s - %s - KO - %s' % (\n operation.id, stmp_id, sessionid, fn.__name__, \n params, elapsed))\n logger.error(ex.orig, exc_info=1)\n logger.error(ex.orig)\n\n session.rollback()\n raise TransactionError(ex.orig)\n except DBAPIError as ex:\n elapsed = round(time() - start, 4)\n logger.error(u'%s.%s - %s - transaction - %s - %s - KO - %s' % (\n operation.id, stmp_id, sessionid, fn.__name__, \n params, elapsed))\n #logger.error(ex.orig, exc_info=1)\n logger.error(ex.orig)\n \n session.rollback()\n raise TransactionError(ex.orig)\n \n except Exception as ex:\n elapsed = round(time() - start, 4)\n logger.error(u'%s.%s - %s - transaction - %s - %s - KO - %s' % (\n operation.id, stmp_id, sessionid, fn.__name__, \n params, elapsed))\n #logger.error(ex, exc_info=1)\n logger.error(ex)\n \n session.rollback()\n raise TransactionError(ex)\n\n return transaction_inner", "def test_commit(self):\n # TODO: Test errors while committing and recovery\n pass", "def rollback(self):\n self.conn.rollback()", "def end_transaction(self):\r\n self.db.commit(True)", "def rollback(self):\n self._connection.execute_nonquery(\"sql\", \"ROLLBACK\", True)", "def save_query(self):\r\n self.conn.commit()", "def _do_commit(self):\n self.backend.commit()", "async def test_transaction_decorator(database_url):\n async with Database(database_url, force_rollback=True) as database:\n\n @database.transaction()\n async def insert_data(raise_exception):\n query = notes.insert().values(text=\"example\", completed=True)\n await database.execute(query)\n if raise_exception:\n raise RuntimeError()\n\n with pytest.raises(RuntimeError):\n await insert_data(raise_exception=True)\n\n query = notes.select()\n results = await database.fetch_all(query=query)\n assert len(results) == 0\n\n await insert_data(raise_exception=False)\n\n query = notes.select()\n results = await database.fetch_all(query=query)\n assert len(results) == 1", "def transaction() -> Generator:\n session = current_session()\n logger.debug('transaction with session %s', id(session))\n try:\n yield session\n # Only commit if there are un-flushed changes. The caller may commit\n # explicitly, e.g. to do exception handling.\n if session.dirty or session.deleted or session.new:\n session.commit()\n logger.debug('committed!')\n except ClassicBaseException as e:\n logger.debug('Command failed, rolling back: %s', str(e))\n session.rollback()\n raise # Propagate exceptions raised from this module.\n except InvalidEvent:\n session.rollback()\n raise\n except Exception as e:\n logger.debug('Command failed, rolling back: %s', str(e))\n session.rollback()\n raise TransactionFailed('Failed to execute transaction') from e", "def commitToDatabase(self, tiltseriesdata):\n\t\treturn", "def transaction_failed_before_processing(self):", "async def test_commit_on_root_transaction(database_url):\n\n async with Database(database_url) as database:\n try:\n async with database.transaction():\n query = notes.insert().values(text=\"example1\", completed=True)\n await database.execute(query)\n\n query = notes.select()\n results = await database.fetch_all(query=query)\n assert len(results) == 1\n finally:\n query = notes.delete()\n await database.execute(query)", "def commit(self):\n pass", "def commit(self):\n pass", "def commit(self):\n pass", "def commit(self):\n pass", "def commit(self):\n pass", "def commit(self):\n curs = self.cursor()\n self.clearTempTables(curs)\n super(_MockConnection, self).commit()", "def Transaction(db):\n def wrapper(f):\n def transaction_wrapper(*args, **kwargs):\n tx = db.beginTx()\n \n try: args[0].transaction = tx\n except: pass\n \n result = f(*args, **kwargs)\n tx.success()\n tx.close()\n return result\n return transaction_wrapper\n return wrapper", "def database_commit(connector):\n try:\n connector.commit()\n except Exception as e:\n raise Exception(\n \"An error occurred while committing the modifications in the database: %s\"\n % e\n )", "def test_autocommit_off(self):\n res = self.dbh.run_autocommit(False)\n self.assertIsNone(res)", "def test_begin_transaction(self):\n # the utility we're testing here avoids setting the isolation level when this setting is True\n # because tests usually run within their own transaction. By the time the isolation level\n # is attempted to be set within a test, there have been reads and writes and the isolation\n # cannot be changed\n self.assertFalse(connection.in_atomic_block)\n with _begin_transaction(None, isolated=True):\n session = SyncSession.objects.create(\n id=uuid.uuid4().hex,\n profile=\"facilitydata\",\n last_activity_timestamp=timezone.now(),\n )\n transfer_session = TransferSession.objects.create(\n id=uuid.uuid4().hex,\n sync_session=session,\n push=True,\n last_activity_timestamp=timezone.now(),\n )\n create_buffer_and_store_dummy_data(transfer_session.id)\n\n # manual cleanup\n self.assertNotEqual(0, Store.objects.all().count())\n # will cascade delete\n SyncSession.objects.all().delete()\n Store.objects.all().delete()", "def test_flush_wrapper_plain_integrity_error(self):\n\n _session = self.sessionmaker()\n\n with _session.begin():\n foo = self.Foo(counter=1)\n _session.add(foo)\n\n _session.begin()\n self.addCleanup(_session.rollback)\n foo = self.Foo(counter=None)\n _session.add(foo)\n self.assertRaises(exception.DBError, _session.flush)", "def transact(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n with transaction() as conn:\n fn(conn, *args, **kwargs)\n return wrapper", "def test_no_rollback_in_committed_state(self):\n\n sess = fixture_session()\n\n def fail(*arg, **kw):\n raise BaseException(\"some base exception\")\n\n with mock.patch.object(\n testing.db.dialect, \"do_rollback\", side_effect=fail\n ) as fail_mock, mock.patch.object(\n testing.db.dialect,\n \"do_commit\",\n side_effect=testing.db.dialect.do_commit,\n ) as succeed_mock:\n # sess.begin() -> commit(). why would do_rollback() be called?\n # because of connection pool finalize_fairy *after* the commit.\n # this will cause the conn.close() in session.commit() to fail,\n # but after the DB commit succeeded.\n with expect_raises_message(BaseException, \"some base exception\"):\n with sess.begin():\n conn = sess.connection()\n fairy_conn = conn.connection\n\n eq_(succeed_mock.mock_calls, [mock.call(fairy_conn)])\n eq_(fail_mock.mock_calls, [mock.call(fairy_conn)])", "def run_transaction(self, op, max_retries=3):\n\n if Database.conn is None:\n raise TypeError(\"Connection should not be None. Did you run connect_to_db()?\")\n\n # leaving this block the transaction will commit or rollback\n # (if leaving with an exception)\n with Database.conn:\n for retry in range(1, max_retries + 1):\n try:\n result = op(Database.conn)\n\n # If we reach this point, we were able to commit, so we break\n # from the retry loop.\n return result\n\n except SerializationFailure as e:\n # This is a retry error, so we roll back the current\n # transaction and sleep for a bit before retrying. The\n # sleep time increases for each failed transaction.\n logging.debug(\"got error: %s\", e)\n Database.conn.rollback()\n logging.debug(\"EXECUTE SERIALIZATION_FAILURE BRANCH\")\n sleep_ms = (2 ** retry) * 0.1 * (random.random() + 0.5)\n logging.debug(\"Sleeping %s seconds\", sleep_ms)\n time.sleep(sleep_ms)\n\n except psycopg2.Error as e:\n logging.debug(\"got error: %s\", e)\n logging.debug(\"EXECUTE NON-SERIALIZATION_FAILURE BRANCH\")\n raise e\n\n raise ValueError(f\"Transaction did not succeed after {max_retries} retries\")", "def _check_transactional_ddl(self):\n table_name = \"yoyo_tmp_{}\".format(utils.get_random_string(10))\n table_name_quoted = self.quote_identifier(table_name)\n sql = self.create_test_table_sql.format(table_name_quoted=table_name_quoted)\n with self.transaction() as t:\n self.execute(sql)\n t.rollback()\n try:\n with self.transaction():\n self.execute(\"DROP TABLE {}\".format(table_name_quoted))\n except self.DatabaseError:\n return True\n return False", "def begin(self):\n self._in_transaction = True\n self.execute(\"BEGIN\")", "def func():\n try:\n session.add(point_table(id=5, ptest=(None, 1)))\n session.commit()\n session.close()\n except Exception as e:\n session.close()\n raise e", "def func():\n try:\n session.add(point_table(id=4, ptest=(1, None)))\n session.commit()\n session.close()\n except Exception as e:\n session.close()\n raise e", "def rollback(self):\n if self.dbmi.__name__ == \"psycopg2\":\n if self.connected:\n self.connection.rollback()", "def commit(self):\n try:\n db.session.commit()\n except:\n db.session.rollback()\n raise", "def test_invalid_table(self):\n self.execute_query_expect_failure(self.client, \"select * from functional.bad_serde\")\n # The table expires after 1 second. Sleeping for another logbufsecs=5 seconds to wait\n # for the log to be flushed. Wait 4 more seconds to reduce flakiness.\n time.sleep(10)\n assert \"Unexpected exception thrown while attempting to automatically invalidate \"\\\n \"tables\" not in open(os.path.join(self.impala_log_dir, \"catalogd.INFO\")).read()", "def commit(self):\n raise NotImplementedError", "def transaction(install_module):\n from trytond.transaction import Transaction\n from trytond.tests.test_tryton import USER, CONTEXT, DB_NAME\n\n with Transaction().start(DB_NAME, USER, context=CONTEXT) as transaction:\n yield transaction\n\n transaction.cursor.rollback()", "def commit(self):\n self.conn.commit()", "def commitQuery(self):\r\n\t\tself.session.commit()", "def commit():\n try:\n yield\n db.session.commit()\n except:\n print('Rolling back changes...')\n db.session.rollback()\n raise", "def transaction(session: Union[scoped_session, Session, \"SessionlessContext\"]):\n # temporary hack; need to fix access to scoped_session callable, not proxy\n if isinstance(session, scoped_session):\n session = session()\n # hack: this could be model.store.SessionlessContext; then we don't need to do anything\n elif not isinstance(session, Session):\n yield\n return # exit: can't use as a Session\n\n if not session.in_transaction():\n with session.begin():\n yield\n else:\n yield", "def transaction(self):\n return MySQLConnection.Transaction(self)", "def test_flush_wrapper_operational_error(self):\n\n _session = self.sessionmaker()\n\n with _session.begin():\n foo = self.Foo(counter=1)\n _session.add(foo)\n\n _session.begin()\n self.addCleanup(_session.rollback)\n foo = self.Foo(counter=sqla.func.imfake(123))\n _session.add(foo)\n matched = self.assertRaises(sqla.exc.OperationalError, _session.flush)\n self.assertIn(\"no such function\", str(matched))", "def db_connection(engine, tables):\n connection = engine.connect()\n # begin the nested transaction\n transaction = connection.begin()\n\n yield transaction\n\n # roll back the broader transaction\n transaction.rollback()\n # put back the connection to the connection pool\n connection.close()", "def is_transaction(self) -> bool:\n return False", "def commit(self):\n self.session.commit()", "def apply_and_commit(self) -> None:\n self.apply()\n self.commit()", "def db_transaction(func):\n @wraps(func)\n def _db_transaction(*args, **kwargs):\n with sqlite3.connect(DB_ABS_PATH) as connection:\n cursor = connection.cursor()\n ret = func(cursor, *args, **kwargs)\n return ret\n return _db_transaction" ]
[ "0.6690196", "0.6536858", "0.65303385", "0.65088254", "0.63948905", "0.6361204", "0.63229173", "0.6194685", "0.6192952", "0.61433756", "0.61367714", "0.61094934", "0.60980386", "0.6026481", "0.6020292", "0.6011731", "0.59969383", "0.5978559", "0.5967903", "0.5959607", "0.59506536", "0.59402466", "0.59363204", "0.59093654", "0.58996767", "0.58940995", "0.58772254", "0.5862373", "0.58048004", "0.5804404", "0.57893753", "0.57740086", "0.5771913", "0.5771033", "0.5761624", "0.5745583", "0.57442266", "0.5738512", "0.5726467", "0.5725049", "0.5724942", "0.57184035", "0.5707201", "0.5692355", "0.5691538", "0.56802714", "0.5677173", "0.566072", "0.5654419", "0.565047", "0.56468", "0.56405944", "0.5635513", "0.5623225", "0.56113416", "0.5611144", "0.5609999", "0.5596988", "0.55958", "0.5593891", "0.55890626", "0.55808014", "0.5574265", "0.55735385", "0.5566281", "0.5559748", "0.55502707", "0.553165", "0.553165", "0.553165", "0.553165", "0.553165", "0.5530682", "0.55216205", "0.55115193", "0.551019", "0.55101484", "0.5501046", "0.549913", "0.5482556", "0.5477731", "0.5465642", "0.54591453", "0.54511714", "0.5448804", "0.5446673", "0.543002", "0.54277813", "0.5427157", "0.5425816", "0.54257655", "0.54214793", "0.54179186", "0.54177624", "0.54091835", "0.54063267", "0.5405105", "0.54040015", "0.5393485", "0.5388049", "0.5387637" ]
0.0
-1
Impala doesn't support transactions; raises NotSupportedError
def rollback(self): # PEP 249 raise impala.error.NotSupportedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def supports_transactions(self):\n return self._mysql_storage_engine != \"MyISAM\"", "def supports_transactions(self):\n return False", "def with_transaction(session, f):\n try:\n f(session)\n session.commit()\n except Exception as e:\n session.rollback()\n raise e", "def start_transaction(self):\n raise Unsupported()", "def custom_statement(statement):\n try:\n conn = _access()\n cur = conn.cursor()\n conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n cur.execute(\"{}\".format(statement))\n conn.close()\n return True\n except IntegrityError, ProgrammingError:\n return False", "def test_execute_transaction(data_manager):\n class NotFred(Exception):\n pass\n\n try:\n with data_manager.dal():\n session = current_context.sqlalchemy\n metadata.create_all(session.bind)\n \n session.execute(text('INSERT INTO test (name) VALUES (:name)'), {'name': 'Fred'})\n record = session.execute(text('SELECT * FROM test')).first()\n assert record.name == 'Fred'\n\n # Now error out to remove \"Fred\"\n raise NotFred('Do not like Fred')\n except NotFred:\n pass\n\n with data_manager.dal():\n session = current_context.sqlalchemy\n record = session.execute(text('SELECT * FROM test')).first()\n # Fred should have been rolled back\n assert not record", "def test_rollback(self, local_connection):\n\n users = self.tables.users\n connection = local_connection\n transaction = connection.begin()\n connection.execute(users.insert(), dict(user_id=1, user_name=\"user1\"))\n connection.execute(users.insert(), dict(user_id=2, user_name=\"user2\"))\n connection.execute(users.insert(), dict(user_id=3, user_name=\"user3\"))\n transaction.rollback()\n result = connection.exec_driver_sql(\"select * from users\")\n assert len(result.fetchall()) == 0", "def transaction(self, context: InjectionContext = None) -> \"ProfileSession\":", "def transaction_failed(self):", "def commit(self, transaction):\n raise NotImplementedError", "def test_invalid_table(self):\n self.execute_query_expect_failure(self.client, \"select * from functional.bad_serde\")\n # The table expires after 1 second. Sleeping for another logbufsecs=5 seconds to wait\n # for the log to be flushed. Wait 4 more seconds to reduce flakiness.\n time.sleep(10)\n assert \"Unexpected exception thrown while attempting to automatically invalidate \"\\\n \"tables\" not in open(os.path.join(self.impala_log_dir, \"catalogd.INFO\")).read()", "async def test_transaction_rollback_low_level(database_url):\n\n async with Database(database_url) as database:\n async with database.transaction(force_rollback=True):\n transaction = await database.transaction()\n try:\n query = notes.insert().values(text=\"example1\", completed=True)\n await database.execute(query)\n raise RuntimeError()\n except:\n await transaction.rollback()\n else: # pragma: no cover\n await transaction.commit()\n\n query = notes.select()\n results = await database.fetch_all(query=query)\n assert len(results) == 0", "async def test_transaction_rollback(database_url):\n\n async with Database(database_url) as database:\n async with database.transaction(force_rollback=True):\n try:\n async with database.transaction():\n query = notes.insert().values(text=\"example1\", completed=True)\n await database.execute(query)\n raise RuntimeError()\n except RuntimeError:\n pass\n\n query = notes.select()\n results = await database.fetch_all(query=query)\n assert len(results) == 0", "def start_transaction_sql(self):\n return 'START TRANSACTION;'", "def BeginTransaction(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def start_transaction_sql(self):\n return \"BEGIN TRANSACTION\"", "async def test_transaction_commit_low_level(database_url):\n\n async with Database(database_url) as database:\n async with database.transaction(force_rollback=True):\n transaction = await database.transaction()\n try:\n query = notes.insert().values(text=\"example1\", completed=True)\n await database.execute(query)\n except: # pragma: no cover\n await transaction.rollback()\n else:\n await transaction.commit()\n\n query = notes.select()\n results = await database.fetch_all(query=query)\n assert len(results) == 1", "def dbtransaction(request):\n connection = engine.connect()\n transaction = connection.begin()\n\n def teardown():\n # Explicitly remove the session so that we'll get a new session every time we go here.\n transaction.rollback()\n connection.close()\n\n request.addfinalizer(teardown)\n return connection", "def cypher_transaction():\n session = cypher.Session(HOST)\n return session.create_transaction()", "def start_transaction(self):\n self._connection.execute_nonquery(\"sql\", \"START TRANSACTION\", True)", "def runSqlNoTransaction(self, sql):\r\n self.c.autocommit = True\r\n cursor = self.c.cursor()\r\n cursor.execute(sql)\r\n self.c.commit()\r\n cursor.close()\r\n self.c.autocommit = False\r\n return True", "def start_transaction(self) -> None:\n pass", "async def test_transaction_decorator(database_url):\n async with Database(database_url, force_rollback=True) as database:\n\n @database.transaction()\n async def insert_data(raise_exception):\n query = notes.insert().values(text=\"example\", completed=True)\n await database.execute(query)\n if raise_exception:\n raise RuntimeError()\n\n with pytest.raises(RuntimeError):\n await insert_data(raise_exception=True)\n\n query = notes.select()\n results = await database.fetch_all(query=query)\n assert len(results) == 0\n\n await insert_data(raise_exception=False)\n\n query = notes.select()\n results = await database.fetch_all(query=query)\n assert len(results) == 1", "def transaction() -> Generator:\n session = current_session()\n logger.debug('transaction with session %s', id(session))\n try:\n yield session\n # Only commit if there are un-flushed changes. The caller may commit\n # explicitly, e.g. to do exception handling.\n if session.dirty or session.deleted or session.new:\n session.commit()\n logger.debug('committed!')\n except ClassicBaseException as e:\n logger.debug('Command failed, rolling back: %s', str(e))\n session.rollback()\n raise # Propagate exceptions raised from this module.\n except InvalidEvent:\n session.rollback()\n raise\n except Exception as e:\n logger.debug('Command failed, rolling back: %s', str(e))\n session.rollback()\n raise TransactionFailed('Failed to execute transaction') from e", "def db_connection(engine, tables):\n connection = engine.connect()\n # begin the nested transaction\n transaction = connection.begin()\n\n yield transaction\n\n # roll back the broader transaction\n transaction.rollback()\n # put back the connection to the connection pool\n connection.close()", "def run_transaction(self, op, max_retries=3):\n\n if Database.conn is None:\n raise TypeError(\"Connection should not be None. Did you run connect_to_db()?\")\n\n # leaving this block the transaction will commit or rollback\n # (if leaving with an exception)\n with Database.conn:\n for retry in range(1, max_retries + 1):\n try:\n result = op(Database.conn)\n\n # If we reach this point, we were able to commit, so we break\n # from the retry loop.\n return result\n\n except SerializationFailure as e:\n # This is a retry error, so we roll back the current\n # transaction and sleep for a bit before retrying. The\n # sleep time increases for each failed transaction.\n logging.debug(\"got error: %s\", e)\n Database.conn.rollback()\n logging.debug(\"EXECUTE SERIALIZATION_FAILURE BRANCH\")\n sleep_ms = (2 ** retry) * 0.1 * (random.random() + 0.5)\n logging.debug(\"Sleeping %s seconds\", sleep_ms)\n time.sleep(sleep_ms)\n\n except psycopg2.Error as e:\n logging.debug(\"got error: %s\", e)\n logging.debug(\"EXECUTE NON-SERIALIZATION_FAILURE BRANCH\")\n raise e\n\n raise ValueError(f\"Transaction did not succeed after {max_retries} retries\")", "async def test_rollback_isolation(database_url):\n\n async with Database(database_url) as database:\n # Perform some INSERT operations on the database.\n async with database.transaction(force_rollback=True):\n query = notes.insert().values(text=\"example1\", completed=True)\n await database.execute(query)\n\n # Ensure INSERT operations have been rolled back.\n query = notes.select()\n results = await database.fetch_all(query=query)\n assert len(results) == 0", "def txn(db):\n with db.atomic() as txn:\n yield\n txn.rollback()", "def startTransaction(self) -> int:\n ...", "def test_auto_rollback(self):\n self.mocked_cursor.execute.side_effect = psycopg2.Error('testing')\n\n db = database.Database()\n try:\n db.execute(sql=\"SELECT * from FOO WHERE bar LIKE 'baz'\")\n except database.DatabaseError:\n pass\n\n self.assertEqual(self.mocked_connection.rollback.call_count, 1)", "def manual_transaction():\r\n try:\r\n yield\r\n except Exception:\r\n transaction.rollback()\r\n log.exception('Due to an error, this transaction has been rolled back')\r\n raise\r\n else:\r\n transaction.commit()", "def _do_commit(self):", "def transaction_failed_before_processing(self):", "async def transact(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:\n stats.inc('transactions', 'SQL')\n async with self.pool.acquire() as conn:\n async with conn.cursor() as cur:\n try:\n ret = await func(cur, *args, **kwargs)\n except:\n await conn.rollback()\n raise\n else:\n await conn.commit()\n return ret", "def _is_transaction_isolation_error(self, error):\n return False", "def transaction():\n try:\n conn = connect_to_db()\n yield conn\n conn.commit()\n except Exception as e:\n print(e)\n conn.rollback()\n finally:\n conn.close()", "def _is_transaction_isolation_error(self, error):\n from psycopg2.extensions import TransactionRollbackError\n\n # Django can wrap errors, adding it to the `__cause__` attribute\n for e in (error, getattr(error, '__cause__', None)):\n if isinstance(e, TransactionRollbackError):\n return True\n return False", "def transaction(fn):\n @wraps(fn)\n def transaction_inner(*args, **kwargs): #1\n start = time()\n stmp_id = id_gen()\n session = operation.session\n sessionid = id(session)\n \n # set distributed transaction id to 0 for single transaction\n try:\n operation.id\n except: \n operation.id = str(uuid4())\n \n try:\n # get runtime info\n cp = current_process()\n ct = current_thread() \n \n # format request params\n params = []\n for item in args:\n params.append(unicode(item))\n for k,v in kwargs.iteritems():\n params.append(u\"'%s':'%s'\" % (k, v))\n \n # call internal function\n res = fn(*args, **kwargs)\n \n session.commit()\n elapsed = round(time() - start, 4)\n logger.debug(u'%s.%s - %s - transaction - %s - %s - OK - %s' % (\n operation.id, stmp_id, sessionid, fn.__name__, \n params, elapsed))\n \n return res\n except ModelError as ex:\n elapsed = round(time() - start, 4)\n logger.error(u'%s.%s - %s - transaction - %s - %s - KO - %s' % (\n operation.id, stmp_id, sessionid, fn.__name__, \n params, elapsed))\n if ex.code not in [409]:\n #logger.error(ex.desc, exc_info=1)\n logger.error(ex.desc)\n \n session.rollback()\n raise TransactionError(ex.desc, code=ex.code)\n except IntegrityError as ex:\n elapsed = round(time() - start, 4)\n logger.error(u'%s.%s - %s - transaction - %s - %s - KO - %s' % (\n operation.id, stmp_id, sessionid, fn.__name__, \n params, elapsed))\n logger.error(ex.orig, exc_info=1)\n logger.error(ex.orig)\n\n session.rollback()\n raise TransactionError(ex.orig)\n except DBAPIError as ex:\n elapsed = round(time() - start, 4)\n logger.error(u'%s.%s - %s - transaction - %s - %s - KO - %s' % (\n operation.id, stmp_id, sessionid, fn.__name__, \n params, elapsed))\n #logger.error(ex.orig, exc_info=1)\n logger.error(ex.orig)\n \n session.rollback()\n raise TransactionError(ex.orig)\n \n except Exception as ex:\n elapsed = round(time() - start, 4)\n logger.error(u'%s.%s - %s - transaction - %s - %s - KO - %s' % (\n operation.id, stmp_id, sessionid, fn.__name__, \n params, elapsed))\n #logger.error(ex, exc_info=1)\n logger.error(ex)\n \n session.rollback()\n raise TransactionError(ex)\n\n return transaction_inner", "def Transaction(db):\n def wrapper(f):\n def transaction_wrapper(*args, **kwargs):\n tx = db.beginTx()\n \n try: args[0].transaction = tx\n except: pass\n \n result = f(*args, **kwargs)\n tx.success()\n tx.close()\n return result\n return transaction_wrapper\n return wrapper", "async def test_iterate_outside_transaction_with_temp_table(database_url):\n\n database_url = DatabaseURL(database_url)\n if database_url.dialect == \"sqlite\":\n pytest.skip(\"SQLite interface does not work with temporary tables.\")\n\n async with Database(database_url) as database:\n query = \"CREATE TEMPORARY TABLE no_transac(num INTEGER)\"\n await database.execute(query)\n\n query = \"INSERT INTO no_transac(num) VALUES (1), (2), (3), (4), (5)\"\n await database.execute(query)\n\n query = \"SELECT * FROM no_transac\"\n iterate_results = []\n\n async for result in database.iterate(query=query):\n iterate_results.append(result)\n\n assert len(iterate_results) == 5", "def upgrade():\n pass\n # op.execute(\"\"\"\n # INSERT INTO context_implications (\n # source_context_id, source_context_scope, context_id, context_scope\n # )\n # SELECT DISTINCT rs.context_id as source_context_id,\n # 'Audit' AS source_context_scope,\n # p.context_id,\n # 'Program' AS context_scope\n # FROM relationships r\n # INNER JOIN responses rs\n # ON rs.id = r.source_id\n # AND r.source_type IN ('Response', 'DocumentationResponse',\n # 'InterviewResponse')\n # INNER JOIN programs p\n # ON p.id = r.destination_id\n # AND r.destination_type = 'Program'\n # WHERE p.private = 1\n # AND (SELECT count(*) from context_implications\n # WHERE source_context_id = rs.context_id\n # AND context_id = p.context_id) < 1\n # \"\"\")\n\n # op.execute(\"\"\"\n # INSERT INTO context_implications (\n # source_context_id, source_context_scope, context_id, context_scope\n # )\n # SELECT DISTINCT sp.context_id as source_context_id,\n # 'Program' AS source_context_scope,\n # p.context_id,\n # 'Program' AS context_scope\n # FROM relationships r\n # INNER JOIN responses rs\n # ON rs.id = r.source_id\n # AND r.source_type IN ('Response', 'DocumentationResponse',\n # 'InterviewResponse')\n # INNER JOIN requests rqs\n # ON rqs.id = rs.request_id\n # INNER JOIN audits a\n # ON a.id = rqs.audit_id\n # INNER JOIN programs sp\n # ON sp.id = a.program_id\n # INNER JOIN programs p\n # ON p.id = r.destination_id\n # AND r.destination_type = 'Program'\n # WHERE p.private = 1\n # AND (SELECT count(*) from context_implications\n # WHERE source_context_id = sp.context_id\n # AND context_id = p.context_id) < 1\n # \"\"\")", "def test_psycopg_binary_query_works(instrument, postgres_connection, elasticapm_client):\n cursor = postgres_connection.cursor()\n query = b\"SELECT * FROM test WHERE name LIKE 't%'\"\n\n baked_query = query.decode()\n try:\n elasticapm_client.begin_transaction(\"web.django\")\n cursor.execute(query)\n result = cursor.fetchall()\n elasticapm_client.end_transaction(None, \"test-transaction\")\n finally:\n # make sure we've cleared out the spans for the other tests.\n assert [(2, \"two\"), (3, \"three\")] == result\n transactions = elasticapm_client.events[TRANSACTION]\n spans = elasticapm_client.spans_for_transaction(transactions[0])\n span = spans[0]\n assert span[\"name\"] == \"SELECT FROM test\"\n assert \"db\" in span[\"context\"]\n assert span[\"context\"][\"db\"][\"instance\"] == \"elasticapm_test\"\n assert span[\"context\"][\"db\"][\"type\"] == \"sql\"\n assert span[\"context\"][\"db\"][\"statement\"] == baked_query", "def upgrade():\n op.create_table(\n \"dag_run_note\",\n sa.Column(\"user_id\", sa.Integer(), nullable=True),\n sa.Column(\"dag_run_id\", sa.Integer(), nullable=False),\n sa.Column(\n \"content\", sa.String(length=1000).with_variant(sa.Text(length=1000), \"mysql\"), nullable=True\n ),\n sa.Column(\"created_at\", UtcDateTime(timezone=True), nullable=False),\n sa.Column(\"updated_at\", UtcDateTime(timezone=True), nullable=False),\n sa.ForeignKeyConstraint(\n (\"dag_run_id\",), [\"dag_run.id\"], name=\"dag_run_note_dr_fkey\", ondelete=\"CASCADE\"\n ),\n sa.ForeignKeyConstraint((\"user_id\",), [\"ab_user.id\"], name=\"dag_run_note_user_fkey\"),\n sa.PrimaryKeyConstraint(\"dag_run_id\", name=op.f(\"dag_run_note_pkey\")),\n )\n\n op.create_table(\n \"task_instance_note\",\n sa.Column(\"user_id\", sa.Integer(), nullable=True),\n sa.Column(\"task_id\", StringID(), nullable=False),\n sa.Column(\"dag_id\", StringID(), nullable=False),\n sa.Column(\"run_id\", StringID(), nullable=False),\n sa.Column(\"map_index\", sa.Integer(), nullable=False),\n sa.Column(\n \"content\", sa.String(length=1000).with_variant(sa.Text(length=1000), \"mysql\"), nullable=True\n ),\n sa.Column(\"created_at\", UtcDateTime(timezone=True), nullable=False),\n sa.Column(\"updated_at\", UtcDateTime(timezone=True), nullable=False),\n sa.PrimaryKeyConstraint(\n \"task_id\", \"dag_id\", \"run_id\", \"map_index\", name=op.f(\"task_instance_note_pkey\")\n ),\n sa.ForeignKeyConstraint(\n (\"dag_id\", \"task_id\", \"run_id\", \"map_index\"),\n [\n \"task_instance.dag_id\",\n \"task_instance.task_id\",\n \"task_instance.run_id\",\n \"task_instance.map_index\",\n ],\n name=\"task_instance_note_ti_fkey\",\n ondelete=\"CASCADE\",\n ),\n sa.ForeignKeyConstraint((\"user_id\",), [\"ab_user.id\"], name=\"task_instance_note_user_fkey\"),\n )", "def transaction_session(session=None, auto_commit=False):\n from decanter.database import db\n session = session or db.session\n try:\n yield session\n # Catch any and every exception and execute a rollback\n except Exception, e:\n session.rollback()\n # Now re-raise the exception to let it continue to bubble up\n raise e\n if auto_commit:\n session.commit()", "def transaction(install_module):\n from trytond.transaction import Transaction\n from trytond.tests.test_tryton import USER, CONTEXT, DB_NAME\n\n with Transaction().start(DB_NAME, USER, context=CONTEXT) as transaction:\n yield transaction\n\n transaction.cursor.rollback()", "def abort(self, transaction):\n raise NotImplementedError", "def is_transaction(self) -> bool:\n return False", "def start_transaction(self):\n self._db.autocommit = False", "def transaction(session: Union[scoped_session, Session, \"SessionlessContext\"]):\n # temporary hack; need to fix access to scoped_session callable, not proxy\n if isinstance(session, scoped_session):\n session = session()\n # hack: this could be model.store.SessionlessContext; then we don't need to do anything\n elif not isinstance(session, Session):\n yield\n return # exit: can't use as a Session\n\n if not session.in_transaction():\n with session.begin():\n yield\n else:\n yield", "async def test_transaction_commit(database_url):\n async with Database(database_url) as database:\n async with database.transaction(force_rollback=True):\n async with database.transaction():\n query = notes.insert().values(text=\"example1\", completed=True)\n await database.execute(query)\n\n query = notes.select()\n results = await database.fetch_all(query=query)\n assert len(results) == 1", "def begin_transaction(self):\r\n self.db.commit(False)", "def abort_transaction(self) -> None:\n pass", "def _check_transactional_ddl(self):\n table_name = \"yoyo_tmp_{}\".format(utils.get_random_string(10))\n table_name_quoted = self.quote_identifier(table_name)\n sql = self.create_test_table_sql.format(table_name_quoted=table_name_quoted)\n with self.transaction() as t:\n self.execute(sql)\n t.rollback()\n try:\n with self.transaction():\n self.execute(\"DROP TABLE {}\".format(table_name_quoted))\n except self.DatabaseError:\n return True\n return False", "def test_flush_wrapper_operational_error(self):\n\n _session = self.sessionmaker()\n\n with _session.begin():\n foo = self.Foo(counter=1)\n _session.add(foo)\n\n _session.begin()\n self.addCleanup(_session.rollback)\n foo = self.Foo(counter=sqla.func.imfake(123))\n _session.add(foo)\n matched = self.assertRaises(sqla.exc.OperationalError, _session.flush)\n self.assertIn(\"no such function\", str(matched))", "def commit_transaction(self) -> None:\n pass", "def __call__(self):\n session = self._session()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()", "def transact(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n with transaction() as conn:\n fn(conn, *args, **kwargs)\n return wrapper", "def test_set_isolation_level(self):\n eng = create_engine(testing.db.url)\n eq_(\n eng.execute(\"show transaction isolation level\").scalar(),\n 'read committed')\n eng = create_engine(testing.db.url, isolation_level=\"SERIALIZABLE\")\n eq_(\n eng.execute(\"show transaction isolation level\").scalar(),\n 'serializable')\n eng = create_engine(testing.db.url, isolation_level=\"FOO\")\n\n if testing.db.driver == 'zxjdbc':\n exception_cls = eng.dialect.dbapi.Error\n else:\n exception_cls = eng.dialect.dbapi.ProgrammingError\n assert_raises(exception_cls, eng.execute, \"show transaction isolation level\")", "def test_unsupported_sql(self):\n user = getuser()\n impala_client = self.create_impala_client()\n error_msg = \"UnsupportedFeatureException: {0} is not supported by Sentry.\"\n statements = [(\"grant select on database functional to user foo\",\n error_msg.format(\"GRANT <privilege> TO USER\")),\n (\"grant select on database functional to group foo\",\n error_msg.format(\"GRANT <privilege> TO GROUP\")),\n (\"revoke select on database functional from user foo\",\n error_msg.format(\"REVOKE <privilege> FROM USER\")),\n (\"revoke select on database functional from group foo\",\n error_msg.format(\"REVOKE <privilege> FROM GROUP\")),\n (\"show grant group foo\", error_msg.format(\"SHOW GRANT GROUP\"))]\n for statement in statements:\n result = self.execute_query_expect_failure(impala_client, statement[0], user=user)\n assert statement[1] in str(result)", "def commit_query(conn):\r\n conn.commit()", "def test_unsupported_conn_type(self):\n op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"redis_default\",\n sql=\"SELECT count(1) FROM INFORMATION_SCHEMA.TABLES\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n with pytest.raises(AirflowException):\n op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def BeginTransaction(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def test_join_transaction_modes(\n self,\n connection_no_trans,\n join_transaction_mode,\n operation,\n external_state: testing.Variation,\n ):\n\n connection = connection_no_trans\n\n t1: Optional[Transaction]\n s1: Optional[NestedTransaction]\n\n if external_state.none:\n t1 = s1 = None\n elif external_state.transaction:\n t1 = connection.begin()\n s1 = None\n elif external_state.savepoint:\n t1 = connection.begin()\n s1 = connection.begin_nested()\n else:\n external_state.fail()\n\n if join_transaction_mode.none:\n sess = Session(connection)\n else:\n sess = Session(\n connection, join_transaction_mode=join_transaction_mode.name\n )\n\n sess.connection()\n\n if operation.close:\n sess.close()\n elif operation.commit:\n sess.commit()\n elif operation.rollback:\n sess.rollback()\n elif operation.nothing:\n pass\n else:\n operation.fail()\n\n if external_state.none:\n if operation.nothing:\n assert connection.in_transaction()\n else:\n assert not connection.in_transaction()\n\n elif external_state.transaction:\n assert t1 is not None\n\n if (\n join_transaction_mode.none\n or join_transaction_mode.conditional_savepoint\n or join_transaction_mode.rollback_only\n ):\n if operation.rollback:\n assert t1._deactivated_from_connection\n assert not t1.is_active\n else:\n assert not t1._deactivated_from_connection\n assert t1.is_active\n elif join_transaction_mode.create_savepoint:\n assert not t1._deactivated_from_connection\n assert t1.is_active\n elif join_transaction_mode.control_fully:\n if operation.nothing:\n assert not t1._deactivated_from_connection\n assert t1.is_active\n else:\n assert t1._deactivated_from_connection\n assert not t1.is_active\n else:\n join_transaction_mode.fail()\n\n if t1.is_active:\n t1.rollback()\n elif external_state.savepoint:\n assert s1 is not None\n assert t1 is not None\n\n assert not t1._deactivated_from_connection\n assert t1.is_active\n\n if join_transaction_mode.rollback_only:\n if operation.rollback:\n assert s1._deactivated_from_connection\n assert not s1.is_active\n else:\n assert not s1._deactivated_from_connection\n assert s1.is_active\n elif join_transaction_mode.control_fully:\n if operation.nothing:\n assert not s1._deactivated_from_connection\n assert s1.is_active\n else:\n assert s1._deactivated_from_connection\n assert not s1.is_active\n else:\n if operation.nothing:\n # session is still open in the sub-savepoint,\n # so we are not activated on connection\n assert s1._deactivated_from_connection\n\n # but we are still an active savepoint\n assert s1.is_active\n\n # close session, then we're good\n sess.close()\n\n assert not s1._deactivated_from_connection\n assert s1.is_active\n\n if s1.is_active:\n s1.rollback()\n if t1.is_active:\n t1.rollback()\n else:\n external_state.fail()", "def no_transact_batch(self):\n return NoTransactionBatch(self._client)", "def test_upsert_no_id_field():\n df = DataFrame({\"A\": [1, 2, 3], \"B\": list(\"abc\"), \"C\": [4.0, 5.0, nan]})\n with pytest.raises(ValueError):\n df_to_azure(df=df, tablename=\"wrong_method\", schema=\"test\", method=\"insert\")", "def create_tx(\n self,\n query: str,\n query_params: Optional[Mapping[str, Any]] = None,\n ):\n tx = self.get_session().begin_transaction()\n try:\n # logger.info(query)\n tx.run(query, parameters=query_params)\n tx.commit()\n except Exception as e:\n logger.error(e)\n finally:\n tx.close()", "def test_execute(data_manager):\n with data_manager.dal():\n session = current_context.sqlalchemy\n assert isinstance(session, orm.Session)\n\n metadata.create_all(session.bind)\n \n session.execute(text('INSERT INTO test (name) VALUES (:name)'), {'name': 'Fred'})\n record = session.execute(text('SELECT * FROM test')).first()\n assert record.name == 'Fred'\n\n # Make sure Fred still exists after transaction\n with data_manager.dal():\n session = current_context.sqlalchemy\n assert isinstance(session, orm.Session)\n\n record = session.execute(text('SELECT * FROM test')).first()\n assert record.name == 'Fred'", "def ReadTransaction(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def atomic_transaction(\n using: Union[str, Sequence[str]], savepoint: bool = True\n) -> Union[transaction.Atomic, ExitStack]:\n if isinstance(using, str):\n return transaction.atomic(using=using, savepoint=savepoint)\n\n stack = ExitStack()\n # dict.fromkeys -> deduplicate while preserving order\n for db in dict.fromkeys(using):\n stack.enter_context(transaction.atomic(using=db, savepoint=savepoint))\n return stack", "def transactionEnd(self):\n self.connection.commit()", "def Insert_in_Transaction(db, params):\r\n \r\n try: \r\n db.Transaction.insert_one(\r\n {\r\n \"Open time\": params[0], \r\n \"High\": params[1], \r\n \"Low\": params[2], \r\n \"Open\": params[3], \r\n \"Close\": params[4], \r\n \"Volume\": params[5], \r\n \"Quote asset volume\": params[6], \r\n \"Weighted average\": params[7]\r\n }\r\n )\r\n \r\n except Exception as e:\r\n print(e)", "def _upload_entity_df(\n conn: Connection, table_name: str, entity_df: Union[pd.DataFrame, str]\n) -> None:\n entity_df.reset_index(drop=True, inplace=True)\n\n pa_table = pa.Table.from_pandas(entity_df)\n hive_schema = []\n for field in pa_table.schema:\n hive_type = pa_to_hive_value_type(str(field.type))\n if not hive_type:\n raise ValueError(f'Not supported type \"{field.type}\" in entity_df.')\n hive_schema.append((field.name, hive_type))\n\n with conn.cursor() as cursor:\n \n # Create Hive temporary table according to entity_df schema\n create_entity_table_sql = f\"\"\"\n CREATE TABLE {table_name} (\n {', '.join([f'{col_name} {col_type}' for col_name, col_type in hive_schema])}\n )\n \"\"\"\n cursor.execute(create_entity_table_sql)\n\n def preprocess_value(raw_value, col_type):\n col_type = col_type.lower()\n\n if col_type == \"timestamp\" and isinstance(raw_value, datetime):\n raw_value = raw_value.strftime(\"%Y-%m-%d %H:%M:%S.%f\")\n return f'\"{raw_value}\"'\n\n if col_type in [\"string\", \"timestamp\", \"date\"]:\n return f'\"{raw_value}\"'\n else:\n return str(raw_value)\n\n # Upload entity_df to the Hive table by multiple rows insert method\n entity_count = len(pa_table)\n chunk_size = (\n entity_count\n if _ENTITY_UPLOADING_CHUNK_SIZE <= 0\n else _ENTITY_UPLOADING_CHUNK_SIZE\n )\n for batch in pa_table.to_batches(chunk_size):\n chunk_data = []\n for i in range(len(batch)):\n chunk_data.append(\n [\n preprocess_value(batch.columns[j][i].as_py(), hive_schema[j][1])\n for j in range(len(hive_schema))\n ]\n )\n\n entity_chunk_insert_sql = f\"\"\"\n INSERT INTO TABLE {table_name} ({', '.join([f'{col_name}' for col_name, col_type in hive_schema])})\n VALUES ({'), ('.join([', '.join(chunk_row) for chunk_row in chunk_data])})\n \"\"\"\n cursor.execute(entity_chunk_insert_sql)", "def func():\n try:\n session.add(point_table(id=5, ptest=(None, 1)))\n session.commit()\n session.close()\n except Exception as e:\n session.close()\n raise e", "def create_transaction(conn, transaction):\n sql = ''' INSERT INTO transactions(date, value, currency, desc, categ)\n VALUES(?, ?, ?, ?, ?) '''\n cur = conn.cursor()\n cur.execute(sql, transaction)", "def test_rollback():", "def test_flush_wrapper_plain_integrity_error(self):\n\n _session = self.sessionmaker()\n\n with _session.begin():\n foo = self.Foo(counter=1)\n _session.add(foo)\n\n _session.begin()\n self.addCleanup(_session.rollback)\n foo = self.Foo(counter=None)\n _session.add(foo)\n self.assertRaises(exception.DBError, _session.flush)", "async def test_commit_on_root_transaction(database_url):\n\n async with Database(database_url) as database:\n try:\n async with database.transaction():\n query = notes.insert().values(text=\"example1\", completed=True)\n await database.execute(query)\n\n query = notes.select()\n results = await database.fetch_all(query=query)\n assert len(results) == 1\n finally:\n query = notes.delete()\n await database.execute(query)", "def xa(db_url, metadata=None):\n if db_url.startswith('sqlite:'):\n engine = sqlalchemy.create_engine(db_url, isolation_level='SERIALIZABLE')\n else:\n engine = sqlalchemy.create_engine(db_url)\n if metadata is not None:\n metadata.create_all(engine)\n session_factory = sessionmaker(bind=engine)\n session = scoped_session(session_factory)\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()", "def CreateTransaction(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def testTransactions2(self):\n predicate = u\"metadata:predicate_Îñţér\"\n t1 = data_store.DB.Transaction(u\"metadata:row1Îñţér\", token=self.token)\n t2 = data_store.DB.Transaction(u\"metadata:row2Îñţér\", token=self.token)\n\n # This grabs read locks on these transactions\n t1.Resolve(predicate)\n t2.Resolve(predicate)\n\n # Now this should not raise since t1 and t2 are on different subjects\n t1.Set(predicate, \"1\")\n t1.Commit()\n t2.Set(predicate, \"2\")\n t2.Commit()", "def func():\n try:\n session.add(point_table(id=4, ptest=(1, None)))\n session.commit()\n session.close()\n except Exception as e:\n session.close()\n raise e", "def transform_aa_growth(engine):\n Base.metadata.create_all(engine)\n\n #Create the session\n session = sessionmaker()\n session.configure(bind=engine)\n s = session()\n\n for pid in s.query(AaMonth.publisher_id).distinct():\n try:\n # apple = 284417353\n app = get_most_popular_app(engine=engine,\n publisher_id=pid.publisher_id)\n app1 = get_app_by_month(engine=engine, app_id=app.app_id, offset=1)\n app3 = get_app_by_month(engine=engine, app_id=app.app_id, offset=3)\n\n mau = app.active_users\n mau1 = None\n mau3 = None\n mau_growth1 = None\n mau_growth3 = None\n if mau is not None:\n if app1 is not None and app1.active_users is not None:\n mau1 = app1.active_users;\n mau_growth1 = \\\n format((mau - mau1) / float(mau1) * 100, '.0f')\n if app3 is not None and app3.active_users is not None:\n mau3 = app3.active_users;\n mau_growth3 = \\\n format((mau - mau3) / float(mau3) * 100, '.0f')\n\n record = AaGrowth(**{\n 'name':app.company_name,\n 'mau':mau,\n 'mau1':mau1,\n 'mau3':mau3,\n 'period':app.period,\n 'mau_growth1':mau_growth1,\n 'mau_growth3':mau_growth3\n })\n\n s.add(record) #Add all the records\n except:\n print pid.publisher_id+ ': ' + traceback.format_exc()\n print 'committing'\n try:\n s.commit()\n except:\n s.rollback()\n print traceback.format_exc()\n finally:\n s.close() #Close the connection", "def transaction(self, transaction):\n # Allow for a list of blocks..\n transaction = utils.request_type(transaction)\n\n res = r.get(self.url + self.tx_info + str(transaction))\n return self.execute(res)", "def CreateTransaction(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def _do_rollback(self):\n self.backend.rollback()", "def get_transactions_db():\n if not hasattr(g, 'transactions_db'):\n g.pg_db = connect_transactions_db()\n return g.pg_db", "def tpc_begin(self, transaction):\n raise NotImplementedError", "def test_s3_table_functions_timeouts(started_cluster):\n with PartitionManager() as pm:\n pm.add_network_delay(node, 1200)\n\n with pytest.raises(QueryRuntimeException):\n node.query(\n \"\"\"\n INSERT INTO FUNCTION s3\n (\n nc_s3,\n filename = 'test_file.tsv.gz',\n format = 'TSV',\n structure = 'number UInt64',\n compression_method = 'gz'\n )\n SELECT * FROM numbers(1000000)\n \"\"\",\n settings=settings,\n )", "def test_psycopg_composable_query_works(instrument, postgres_connection, elasticapm_client):\n from psycopg import sql\n\n cursor = postgres_connection.cursor()\n query = sql.SQL(\"SELECT * FROM {table} WHERE {row} LIKE 't%' ORDER BY {row} DESC\").format(\n table=sql.Identifier(\"test\"), row=sql.Identifier(\"name\")\n )\n baked_query = query.as_string(cursor.__wrapped__)\n result = None\n try:\n elasticapm_client.begin_transaction(\"web.django\")\n cursor.execute(query)\n result = cursor.fetchall()\n elasticapm_client.end_transaction(None, \"test-transaction\")\n finally:\n # make sure we've cleared out the spans for the other tests.\n assert [(2, \"two\"), (3, \"three\")] == result\n transactions = elasticapm_client.events[TRANSACTION]\n spans = elasticapm_client.spans_for_transaction(transactions[0])\n span = spans[0]\n assert span[\"name\"] == \"SELECT FROM test\"\n assert \"db\" in span[\"context\"]\n assert span[\"context\"][\"db\"][\"instance\"] == \"elasticapm_test\"\n assert span[\"context\"][\"db\"][\"type\"] == \"sql\"\n assert span[\"context\"][\"db\"][\"statement\"] == baked_query", "def BeginTransaction(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def Rollback(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def test_bad_insert(db):\n with pytest.raises(error.InvalidSerialization):\n _ = db.insert_current('config', db, store_permanently=False)\n\n with pytest.raises(error.InvalidSerialization):\n _ = db.insert('config', db)", "def prepare_for_commit(self):", "def test_commit(self):\n # TODO: Test errors while committing and recovery\n pass", "def transactionally(transactionCreator):\n def thunk(operation):\n return inTransaction(transactionCreator, operation)\n return thunk", "def transactionally(transactionCreator):\n def thunk(operation):\n return inTransaction(transactionCreator, operation)\n return thunk", "def test_begin_transaction(self):\n # the utility we're testing here avoids setting the isolation level when this setting is True\n # because tests usually run within their own transaction. By the time the isolation level\n # is attempted to be set within a test, there have been reads and writes and the isolation\n # cannot be changed\n self.assertFalse(connection.in_atomic_block)\n with _begin_transaction(None, isolated=True):\n session = SyncSession.objects.create(\n id=uuid.uuid4().hex,\n profile=\"facilitydata\",\n last_activity_timestamp=timezone.now(),\n )\n transfer_session = TransferSession.objects.create(\n id=uuid.uuid4().hex,\n sync_session=session,\n push=True,\n last_activity_timestamp=timezone.now(),\n )\n create_buffer_and_store_dummy_data(transfer_session.id)\n\n # manual cleanup\n self.assertNotEqual(0, Store.objects.all().count())\n # will cascade delete\n SyncSession.objects.all().delete()\n Store.objects.all().delete()", "def session_scope(raise_exception=True):\n session = Session()\n try:\n yield session\n session.commit()\n except Exception:\n session.rollback()\n if raise_exception:\n raise\n finally:\n session.close()", "def maybe_commit(job):", "def transaction(self, retries=0):\n def decorator(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n attempts = 0\n with self.get_pool().connection() as conn:\n proxy = ConnectionProxy(conn)\n while True:\n try:\n retval = func(proxy, *args, **kwargs)\n conn.commit()\n except IntegrityError:\n attempts += 1\n if attempts > retries:\n raise\n conn.rollback()\n except Exception:\n conn.rollback()\n raise\n else:\n return retval\n return wrapper\n return decorator" ]
[ "0.66796213", "0.63022", "0.5864729", "0.5848233", "0.58128023", "0.57294697", "0.5692588", "0.56807727", "0.5673939", "0.56446296", "0.5611186", "0.55624133", "0.55621827", "0.5557179", "0.5543098", "0.55403894", "0.54963696", "0.54826725", "0.5479581", "0.54793453", "0.5419566", "0.54181707", "0.54050565", "0.5389542", "0.5354315", "0.53523844", "0.53499866", "0.53475416", "0.5340447", "0.53224605", "0.53208584", "0.5315379", "0.53108466", "0.53028405", "0.52971166", "0.5282442", "0.5275233", "0.52348787", "0.5232794", "0.5231928", "0.5224944", "0.52241343", "0.5222451", "0.52197194", "0.5211752", "0.52111775", "0.5204801", "0.5199041", "0.5197237", "0.5193996", "0.51923233", "0.51756155", "0.51754326", "0.5173069", "0.5150968", "0.5102615", "0.5087703", "0.508593", "0.50574404", "0.50568366", "0.505028", "0.5049199", "0.5046107", "0.5043719", "0.50433743", "0.5034041", "0.50338745", "0.5026858", "0.50160867", "0.50152975", "0.5014123", "0.5011492", "0.49986658", "0.499745", "0.49927643", "0.49867654", "0.49852213", "0.4985076", "0.4983436", "0.4982485", "0.49754608", "0.49705255", "0.49611905", "0.49569577", "0.4955904", "0.49403113", "0.4938866", "0.49311233", "0.4930768", "0.49299994", "0.4919833", "0.49190766", "0.49188516", "0.49172756", "0.4916819", "0.4916819", "0.49099866", "0.49033502", "0.48918337", "0.4889799" ]
0.59182566
2
Checks connection to server by requesting some info from the server.
def ping(self): return rpc.ping(self.service, self.session_handle)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_connection(self):\n for _ in range(3):\n try:\n r = get(f\"http://{self.ip}/student/{self.user}\")\n if r.ok:\n break \n except OSError as e:\n print(f\"Connection error:\\n{e}\")\n sleep(2)\n else:\n raise ConnectionError(f\"Can not connect to server with params ip: {self.ip}, user: {self.user}\")", "def check_connection(self):\n pass", "def checkServerThread(self):\r\n\r\n # check if the server is alive\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\n result = 1\r\n try:\r\n result = sock.connect_ex((\"dealookup.com\", 80))\r\n except:\r\n result = 1 \r\n\r\n # server is not live \r\n if result != 0:\r\n result = 1\r\n\r\n self.checkResultSignal.emit(result)", "def check_server():\n\n url='{url}/auth'.format(url=config.SERVER_URL)\n while True:\n\n try:\n res=request.urlopen(url,timeout=5).read()\n res=str(res,encoding='utf8')\n if 'connection valid' in res:\n break\n else:\n error_str='error: client-> check_server :' \\\n 'no auth to connect to server,exit process'\n info_manager(error_str,type='KEY')\n os._exit(0)\n except Exception as e:\n err_str='error:client->check_server:cannot ' \\\n 'connect to server; process sleeping'\n info_manager(err_str,type='NORMAL')\n time.sleep(5) # sleep for 1 seconds", "def status_check(self):\n try:\n client = self.connect()\n client.sys.is_initialized() # make an actual network connection\n return True\n except:\n return False", "def check_conn():\n try:\n urllib2.urlopen(\"http://www.google.com\", timeout=5)\n return True\n except urllib2.URLError:\n pass\n return False", "def __check(self):\n status = '200 OK'\n try:\n response = get(self.__url)\n status = '{} {}'.format(\n response.status_code,\n http.client.responses[response.status_code]\n )\n except Exception as e:\n status = e.__class__.__name__\n \n if status[:3] == '200':\n self.__notify_up()\n else:\n if not self.downtime_info:\n self.downtime_info = DowntimeInfo(status)\n self.__notify_down()", "def check_connection(self):\n return False", "def check(self):\n try:\n response = requests.head(self.url)\n except requests.exceptions.RequestException:\n return False, \"darkRed\", \"🛑 Connection Error\"\n return self._status_is_okay(response.status_code)", "def check_connection(ctx):\n pprint(ctx.obj.root.get().data)", "def check_connection(url=\"http://example.com/\"):\n try:\n requests.head(url)\n return True\n except requests.ConnectionError:\n spinner.warn(\"No internet connecction 🤭\")\n sys.exit(1)", "def test_check_connection(self):\n self.assertIsNotNone(app.check_connection())", "async def receive_connection_info(self) -> bool:\n # Polls the socket using accept. When data is found, returns ready indicator and encrypted data.\n ready_to_connect = False\n try:\n connect_info = await self.response_manager.get_connection_info(self.kernel_id)\n self._setup_connection_info(connect_info)\n ready_to_connect = True\n except Exception as e:\n if type(e) is timeout or type(e) is TimeoutError or type(e) is asyncio.exceptions.TimeoutError:\n self.log.debug(f\"Waiting for KernelID '{self.kernel_id}' to send connection \"\n f\"info from host '{self.assigned_host}' - retrying...\")\n else:\n error_message = f\"Exception occurred waiting for connection file response for \" \\\n f\"KernelId '{self.kernel_id}' on host '{self.assigned_host}': {e}\"\n await self.kill()\n self.log_and_raise(RuntimeError(error_message), chained=e)\n\n return ready_to_connect", "def check_connection(self):\n try:\n self.mfp.add(2,2)\n logger.info(\"Connection to user API established\")\n except xmlrpclib.ProtocolError:\n logger.error(\"Not possible to connect to MOF+. Check your credentials\")\n exit()\n return", "def check_connection():\n r = requests.get('https://www.google.com')\n if r.status_code == 200:\n print (colored(\"Connected.\", 'green'))\n else:\n print (colored(\"Not Connected.\", 'red'))", "def check_connection(self):\n self._log_msg_start(\"Check connection to module\")\n return self.get_module_info()", "async def check_connection_status(self):\n while True:\n if not self.connected:\n self.log.error(\"Lost connection to spa, attempting reconnect.\")\n await self.connect()\n await asyncio.sleep(10)\n continue\n if (self.lastupd + 5 * self.sleep_time) < time.time():\n self.log.error(\"Spa stopped responding, requesting panel config.\")\n await self.send_panel_req(0, 1)\n await asyncio.sleep(self.sleep_time)", "def __CheckConnectStatus(self):\r\n if not self.tn:\r\n print \"Connection is down!\"\r\n return False\r\n else:\r\n print \"Connection is alive!\"\r\n return True", "def test_connection(server_address, server_username=None, server_password=None, verbose=True):\n\n try:\n if server_username is None and server_password is None:\n r = requests.get(url=server_address)\n else:\n r = requests.get(url=server_address, auth=(server_username, server_password))\n if r.ok:\n if verbose:\n print(\"Network connectivity: VERIFIED. Server \" + server_address + \" is reachable!\")\n return True\n else:\n print(\"Something wrong during connection!\")\n return False\n\n except Exception as e:\n print(e)\n return False", "def check(self, connection):\n return True", "def check_socket(self):\n return self.__send_command(cmd=\"PING\")", "def _check_connection() -> bool:\n return bool(subprocess.check_output([\"hostname\", \"-I\"]))", "def _IsReady(self):\n if self.ip_address is None:\n self._GetIpAddress()\n if self.ip_address is not None:\n url = 'http://%s' % (self.ip_address)\n r = requests.get(url)\n if r.status_code == 200:\n return True\n return False", "def check_server(host, port, path_info='/', timeout=3, retries=30):\n if retries < 0:\n return 0\n time.sleep(.3)\n for i in range(retries):\n try:\n conn = http_client.HTTPConnection(host, int(port), timeout=timeout)\n conn.request('GET', path_info)\n res = conn.getresponse()\n return res.status\n except (socket.error, http_client.HTTPException):\n time.sleep(.3)\n return 0", "def test_server_is_up_and_running(self):\n r = requests.get(self.get_server_url())\n self.assertEqual(r.status_code, 200)", "def test_connection(self):\n req = requests.get(\"http://{}:{}\".format(self.config.options.get(\"Server\", \"ListenAddress\"),\n self.config.options.get(\"Server\", \"Port\")))\n\n self.assertEqual(req.status_code, 200)", "def isConnected():", "def validate_connection(self):\n for hostInfo in self.client.transport.hosts:\n host = hostInfo.get('host')\n port = hostInfo.get('port')\n self.validate_server_connection(host, port)", "def isonline():\n\n conn = httplib.HTTPConnection(\"www.google.com\", timeout=5)\n try:\n conn.request(\"HEAD\", \"/\")\n conn.close()\n return True\n except:\n conn.close()\n return False", "def check_status(self):\n log = logging.getLogger(\"%s.%s\" % (self.log_name,\n inspect.stack()[0][3]))\n log.setLevel(self.log_level)\n\n if self.url:\n return True\n try:\n result = requests.get(self.ext_url)\n self.url = self.ext_url\n return True\n except requests.exceptions.ConnectionError:\n pass\n try:\n result = requests.get(self.local_url)\n log.warning(\"Url '%s' not connecting. Using local_url '%s'\" % \\\n (self.ext_url, self.local_url))\n self.url = self.local_url\n return True\n except requests.exceptions.ConnectionError:\n self.url = None\n log.warning(\"Unable to connect using urls: %s\" % set([self.ext_url,\n self.local_url]))\n return False", "def check_client(self):\n self.current_state = \"No Network\"\n if self.esp_mgr.ap:\n if self.client_socket:\n # client exists\n self.current_state = \"Connected\"\n if self.client_socket.connected():\n if self.client_socket.available():\n data = self.client_socket.recv()\n if data:\n self._add_to_buffer(data)\n else:\n self._close_client()\n if time.monotonic() > self.test_connection:\n data = bytes([0])\n self.send_to_client(data)\n else:\n self._close_client()\n \n else:\n # check for new client\n self.current_state = \"Listening port 23\"\n # reset termious hack\n self.termious = None\n client_sock_num = self.esp_mgr.esp.socket_available(self.server_socket.socknum)\n if client_sock_num != adafruit_esp32spi_socket.NO_SOCKET_AVAIL:\n # new connection\n self.current_state = \"Connected\"\n self.test_connection = time.monotonic() + 5\n self.client_socket = adafruit_esp32spi_socket.socket(socknum=client_sock_num)\n \n self.send_telnet_command([telnet_IAC, telnet_cmd_codes['WONT'], telnet_opt_codes['Echo']])\n self.send_telnet_command([telnet_IAC, telnet_cmd_codes['WONT'], telnet_opt_codes['Suppress GA']])\n return self.current_state", "def test_connection(self, **kwargs):\n try:\n url = \"{0}\".format(self.base_url)\n response = requests.request(\"GET\", url)\n if response.status_code < 500:\n return True\n else:\n return False\n except KeyError:\n return False", "def check_status(self):\n try:\n self.server.ping()\n return True\n except Exception as e:\n return False", "def is_connected():\n \n try:\n socket.create_connection((\"www.google.com\", 80))\n return True\n except OSError:\n pass\n return False", "def is_connected():\n import socket\n try:\n host = socket.gethostbyname(\"www.gov.uk\")\n socket.create_connection((host, 80), 2)\n return True\n except:\n pass\n return False", "def _CheckConnect(self):\n try:\n resp = requests.get(self._target_url, timeout=2)\n if resp.headers['Maximum-Bytes']:\n self._max_bytes = int(resp.headers['Maximum-Bytes'])\n return resp.status_code == 200\n except requests.exceptions.ConnectionError:\n return False\n except Exception as e:\n self.exception('Unexpected test connect failure: %s', str(e))\n return False", "def verify_connection(self):\n return self.device.verify_connection()", "def net_check():\n resp = None\n host = \"https://gitlab.manjaro.org\"\n # noinspection PyBroadException\n try:\n resp = urllib.request.urlopen(host, timeout=2)\n except Exception:\n pass\n return bool(resp)", "def verify_core_connection():\n if not base_url or not api_credentials:\n retrieve_connection_info()\n return", "def verify_core_connection():\n if not base_url or not api_credentials:\n retrieve_connection_info()\n return", "def get_conn(url):\n try:\n request = requests.get(url)\n if request.status_code == 200:\n res = True\n else:\n res = False\n except:\n res = False\n return res", "def ready(self):\n\n if not self.running:\n return False\n\n try:\n response = requests.get(\n 'http://{}:{}'.format(\n self.running_host,\n self.running_port\n )\n )\n except requests.ConnectionError:\n return False\n\n if response.status_code == 404:\n return True\n elif response.status_code == 500:\n return False\n else:\n return False", "def ping():\n api_online = bool(check_url(\"https://rest.ensembl.org/info/ping?\"))\n vertebrate_url_online = bool(check_url(\"http://ftp.ensembl.org\"))\n other_url_online = bool(check_url(\"http://ftp.ensemblgenomes.org\"))\n return api_online and vertebrate_url_online and other_url_online", "def check_server_up(self):\n print \"Connecting to Mongo at %s:%s\" % (self.hostname, self.port)\n try:\n # TODO: update this to use new pymongo Client\n self.api = pymongo.Connection(self.hostname, self.port)\n return True\n except (AutoReconnect, ConnectionFailure), e:\n print e\n return False", "def check_availability(self):\n\t\tif not self.connection_is_usable:\n\t\t\treturn False\n\t\twith self.client_lock:\n\t\t\tif self.stream is None:\n\t\t\t\treturn False\n\t\t\tif self.last_ping is None or self.last_ping.age() >= self.ping_max_age:\n\t\t\t\tself.last_ping = SendPing(self, self.ping_timeout)\n\t\t\tlast_ping = self.last_ping\n\t\treturn last_ping.answered(self.ping_timeout)", "def check_connection(self):\n connections = [self.google.check_connection(), self.dbx.check_connection(), self.box.check_connection()]\n\n if connections.count(True) == 3:\n logging.warning(' All connections OK. System can be used for reads and writes.')\n return []\n elif connections.count(True) == 2:\n logging.critical(\"\\nOnly two connections available. System only usable for reads\")\n down = [i for i in enumerate(connections) if i == False ]\n if 0 in down:\n pass\n #logging.critical(\"Cannot connect to Google.\")\n if 1 in down:\n pass\n #logging.critical(\"Cannot connect to Dropbox\")\n if 2 in down:\n pass\n ##logging.critical(\"Cannot connect to Box\")\n return down\n elif connections.count(True) < 2:\n logging.critical(\"Sufficient connections could not be made. System unsuitable for reads or writes.\")\n down = [i for i in enumerate(connections) if i[1] == False]\n for entry in down:\n if 0 == entry[0]:\n down[0] += ('Google',)\n #logging.critical(\"Cannot connect to Google.\")\n if 1 == entry[0]:\n down[1] += ('Dropbox',)\n #logging.critical(\"Cannot connect to Dropbox\")\n if 2 == entry[0]:\n down[2] += ('Box',)\n #logging.critical(\"Cannot connect to Box\")\n return down", "def CheckIfConnecting(self):\n if self.CheckIfWiredConnecting() or self.CheckIfWirelessConnecting():\n return True\n else:\n return False", "async def check_config(self) -> None:\n try:\n await self._check_api()\n except aiohttp.ClientError as e:\n raise ConnectionError(str(e))", "def check_connection():\n status_code = urllib.request.urlopen(local_settings.DAFT_URL).getcode()\n\n if status_code == 200:\n on_or_404 = 'OK'\n else:\n on_or_404 = 'NOT OK'\n \n return on_or_404", "def verify_connection(self, datasource):\n url = urljoin(self.base_url, \"dataservers\")\n if not self.session.verify:\n import urllib3\n\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n res = self.session.get(url)\n if res.status_code != 200:\n raise ConnectionError\n j = res.json()\n for item in j[\"Items\"]:\n if item[\"Name\"] == datasource:\n return True\n return False", "def _listen_to_requests(self):\n while True:\n try:\n request = self._client.recv(1024)\n except socket.error as err:\n if DEBUG_LEVEL >= 1:\n print \"Got socket error: {}\".format(err.message)\n self._client.close()\n return True\n\n if not request:\n if DEBUG_LEVEL >= 0:\n print \"Closing connection\"\n self._client.close()\n return True\n\n if DEBUG_LEVEL >= 2:\n print request\n\n if not HTTPValidation.validate_request(request):\n if DEBUG_LEVEL >= 0:\n print \"Invalid request, closing...\"\n self._client.send(public_response_functions.get_error_response())\n self._client.close()\n return True\n\n if not self._send_response(request):\n if DEBUG_LEVEL >= 0:\n print \"Closing connection...\"\n self._client.close()\n return", "def central_server_alive(cls, timeout=1):\n central_server_address, _ = cls.get_central_address()\n\n try:\n requests.get(central_server_address, timeout=timeout, verify=False)\n except (Timeout, ConnectionError):\n return False\n\n return True", "def check_internet_connection():\n logging.debug('Checking internet connection')\n try:\n urlopen(config.api_base_url,\n timeout=config.timeout_internet_connection)\n logging.debug('Connected to the internet')\n return True\n except URLError as err:\n logging.debug('No internet connection')\n return False", "def check_for_incoming_info(self):\n\n if self.test_message_response:\n self.parse_incoming_message(self.test_message_response)\n return True\n\n POLL_ONLY_TIMEOUT_VALUE = 0\n got_at_least_one = False\n while (True):\n readables, writables, errors = select.select([self.socket_datastream], [], [], POLL_ONLY_TIMEOUT_VALUE)\n if not self.socket_datastream in readables:\n return got_at_least_one\n got_at_least_one = True\n data, remote_ip_port = self.socket_datastream.recvfrom(MAX_EXPECTED_MSG_SIZE)\n if remote_ip_port != self.ip_port_arduino_datastream:\n errorhandler.loginfo(\"Msg from unexpected source {}\".format(remote_ip_port))\n else:\n errorhandler.logdebug(\"msg received:{}\".format(data.hex()))\n self.parse_incoming_message(data)", "def check(self):\n self.lastcheck = time.time()\n delta = time.time() - self.last\n if delta > 270:\n self.server.restart = True\n self.server.connected = False\n elif delta > 180:\n self.server.printer.raw_message(\"PING :♥\")", "def is_alive():\n\n ## ---------------------------------------------------------------\n \n cmd = dict()\n cmd[\"type_\"] = \"is_alive\"\n cmd[\"name_\"] = \"\"\n\n s = socket.socket(\n socket.AF_INET,\n socket.SOCK_STREAM\n )\n try:\n s.connect((getml.host, getml.port))\n except ConnectionRefusedError:\n return False\n\n comm.send_string(s, json.dumps(cmd))\n\n s.close()\n\n return True", "def test_connection(self):\n r = main.List.connection()\n self.assertTrue(r.ping(), \"Connection failed.\")", "def checkAlive(self, timeout = 1500 , port = 3389):\n time_retry = 90\n # ugly c-style loop \n while 1:\n try:\n ip = self.getIp()\n if not ip:\n logging.warning(\"!Failed to obtain ip address\")\n else:\n logging.info(\"Probing \" + str(ip) + \":\" + str(port) + \" for connectivity\")\n sock = socket.create_connection((ip,port) , timeout)\n sock.close()\n logging.info(\"Server \" + str(ip) + \":\" + str(port) + \" successfully responded\")\n return True\n except Exception as e:\n logging.error(\"!: Failed to probe the remote server for a connection!\")\n logging.error(\"!:\" + str(e))\n logging.error(traceback.format_exc())\n timeout = timeout - time_retry\n if timeout > 0:\n logging.info(\"--- Waiting more \" + str(timeout) + \" for it to respond\");\n time.sleep(time_retry)\n else:\n break\n\n return False", "def ServerIsReady( self ):\n return self.ServerIsHealthy()", "def test(self):\r\n self.log.debug(\"connection test using version query with adapter %s...\", self.adapter)\r\n try:\r\n res = self.get('SELECT * FROM Package WHERE PackageID=\\'179\\'') # 'SELECT Family FROM Version'\r\n if not res:\r\n self.log.critical('no results, database problem.')\r\n return False\r\n else:\r\n self.log.info('connection successful.')\r\n return True\r\n except:\r\n self.log.critical('connection not possible, check host/user/pwd configuration')\r\n return False", "def ready(self):\n\n if not self.running:\n return False\n\n try:\n response = requests.get(\n 'http://{}:{}/v1/kv/health'.format(\n self.running_host,\n self.running_port\n )\n )\n except requests.ConnectionError:\n return False\n\n if response.status_code == 404:\n return True\n elif response.status_code == 500:\n return False\n else:\n return False", "def check(self):\n\n print('Requester object is active: \\t', str(self.is_active))\n print('Number of requests sent: \\t', str(self.n_requests))\n print('Requester opened: \\t\\t', str(self.st_time))\n print('Requester closed: \\t\\t', str(self.en_time))", "def check_server(self, timeout=1):\n ans = None\n end_time = time.time() + timeout\n while time.time() <= end_time:\n try:\n ans = self.xmlproxy.ping()\n except socket_error:\n time.sleep(1)\n except Exception as err:\n self.class_logger.warning(\"Unexpected type of error while checking xmlrpc server - {0} - {1}\".format(type(err), err))\n time.sleep(1)\n else:\n if ans == \"XMLReportingServer\":\n return True\n else:\n message = \"Unknown xmlrpc server is running on localhost:18080\"\n self.class_logger.error(message)\n raise Exception(message)\n return False", "def check_connection_status(status):\n\n if status.status_code == 200:\n return True\n else:\n return False", "def check_connectivity(self):\n return self.connected", "def verifyConn(self):\n if self.cissd is None:\n return False\n\n if self.lastVerify > time.time() - HEARTBEAT_INTERVAL:\n return True\n\n LOG.debug('verifying CISSD connection is alive')\n try:\n self.cissd.sendall('version\\n')\n except socket.error, msg:\n self.cissd = None\n return False\n\n bufsize = VERIFY_CONN_BUF_SIZE\n while ALIVE:\n try:\n buf = self.cissd.recv(bufsize)\n except socket.error, msg:\n self.cissd = None\n return False\n\n if not buf:\n self.cissd = None\n return False\n\n # We read everything CISSD sent us looping once more\n if len(buf) == bufsize:\n continue\n\n # If everything is good, send out our selfstats data.\n if self.selfStats:\n strs = [\n ('reader.lines_collected',\n '', self.reader.linesCollected),\n ('reader.lines_dropped',\n '', self.reader.linesDropped)\n ]\n\n for col in allLivingCollectors():\n strs.append(('collector.lines_sent', 'collector='\n + col.name, col.linesSent))\n strs.append(('collector.lines_received', 'collector='\n + col.name, col.linesRecv))\n strs.append(('collector.lines_invalid', 'collector='\n +col.name, col.linesInvalid))\n\n ts = int(time.time())\n strout = [\"cmanager.%s %d %d %s\"\n % (x[0], ts, x[2], x[1]) for x in strs]\n for line in strout:\n self.sendq.append(line)\n break\n\n self.lastVerify = time.time()\n return True", "def __check_ping(self):\n if not self.communications.ping():\n self.communications.ping(True)", "def can_connect(test_url):\n try:\n requests.get(test_url)\n except (OSError):#connection error\n logger.warning('couldn\\'t reach server on: {test_url}')\n return False\n return True", "def reachable(self):\n service = build('gmail', 'v1', http=Http(timeout=1.0))\n url = urlparse.urlparse(service._baseUrl)\n host = url.hostname\n port = url.port\n try:\n socket.getaddrinfo(host, port, proto=socket.IPPROTO_TCP)\n except (socket.herror, socket.gaierror, URLError, OSError):\n return False\n return True", "def _test_con(self) -> bool:\n self.reset_buffers()\n try:\n serial_number = self.get_exact(b\"<GETSERIAL>>\", size=7)\n except Exception as e:\n # Unsure of exception types.\n logger.warning(f\"{e}\", exc_info=True)\n return False\n # timeout error if wrong\n if len(serial_number) == 7:\n # Not 100% sure... no prescribed method of confirming\n # we're connected to a GMC device in specs\n logger.debug(f\"Test connection serial: {serial_number}\")\n return True\n else:\n logger.warning(f\"Unexpected response: {serial_number}\")\n return False", "def handle_request(self):\n\t\ttry:\n\t\t\trequest, client_address = self.get_request()\n\t\texcept socket.error:\n\t\t\treturn\n\t\tif self.verify_request(request, client_address):\n\t\t\ttry:\n\t\t\t\tself.process_request(request, client_address)\n\t\t\texcept:\n\t\t\t\tself.handle_error(request, client_address)\n\t\t\t\tself.close_request(request)", "def check_connection(self, logger, config) -> Tuple[bool, any]:\n logger.info(\"Checking Appfollow API connection...\")\n try:\n ext_id = config[\"ext_id\"]\n cid = config[\"cid\"]\n api_secret = config[\"api_secret\"]\n response = requests.get(\n f\"https://api.appfollow.io/ratings?ext_id={ext_id}&cid={cid}\", auth=HTTPBasicAuth(api_secret, api_secret)\n )\n if response.status_code == 200:\n return True, None\n else:\n return False, \"Invalid Appfollow API credentials\"\n except Exception as e:\n return False, e", "def connection_check(self, node_id, connection_sleep=0.1):\n retries = 0\n if not self.client.ready(node_id):\n while retries < self.MAX_RETRY:\n if self.client.ready(node_id):\n return True\n time.sleep(connection_sleep)\n retries += 1\n return False\n return True", "def check_cgi_connection(url):\n try:\n return urlopen(url, timeout=15).getcode() == 411\n except HTTPError as e:\n if e.code == 411:\n return True\n warn_user(f\"Connection to {url} failed with error {e}. Retrying with different url and port.\")\n return False\n except (OSError, URLError) as e:\n warn_user(f\"Connection to {url} failed with error {e}. Retrying with different url and port.\")\n return False", "def check():\n req = requests.post('https://net.tsinghua.edu.cn/do_login.php',\n {'action': 'check_online'})\n print(req.text)\n if req.text != 'not_online':\n req = requests.post('https://net.tsinghua.edu.cn/rad_user_info.php')\n info = req.text.split(',')\n traffic = int(info[6]) / 1000000000\n timelen = int(info[2]) - int(info[1])\n timelen_str = '{}:{}:{}'.format(\n timelen // 3600,\n timelen // 60 % 60,\n timelen % 60)\n info_s = 'ip={0[8]},user={0[0]},traffic={1:.2f}GB,timelen={2}'\n info_s = info_s.format(info, traffic, timelen_str)\n print(info_s)", "def _check_connect(self) -> bool:\n\n if (self._conn is None):\n if (self._exception):\n raise base_connection.ConnectException(\n \"No connection established\")\n\n else:\n return False\n\n return True", "def isReachable(self):\n cmd = \"ping -c 1 %s\" % self.ip\n ping_output = commands.getoutput(cmd)\n logger.debug(cmd)\n logger.debug(ping_output)\n return re.search(\"1[\\s\\w]+received\", ping_output) is not None", "def verify_get_response(self, status):\n validate(status, STATUS)\n self.assertTrue(status['database_connection']['connected'])\n self.assertTrue(status['redis_connection']['connected'])\n self.assertEqual(status['missing_workers'], [])\n self.assertNotEqual(status['online_workers'], [])\n self.assertNotEqual(status['versions'], [])", "def is_connected():\r\n global connection\r\n if connection is None:\r\n return False\r\n else:\r\n return True", "def _verify_http_connection(self, ssh_client, ssh_server,\n test_ip, test_port, servers, should_pass=True):\n utils.kill_nc_process(ssh_server)\n url = 'http://%s:%d' % (test_ip, test_port)\n utils.spawn_http_server(ssh_server, port=test_port, message='foo_ok')\n utils.process_is_running(ssh_server, 'nc')\n try:\n ret = utils.call_url_remote(ssh_client, url)\n if should_pass:\n self.assertIn('foo_ok', ret)\n return\n self.assertNotIn('foo_ok', ret)\n except Exception as e:\n if not should_pass:\n return\n self._log_console_output(servers)\n self._log_local_network_status()\n raise e", "def connected_internet() -> bool:\n url = \"http://www.google.com\"\n timeout = 5\n try:\n requests.get(url, timeout=timeout)\n return True\n except (requests.ConnectionError, requests.Timeout):\n print(\"\\nError: No internet connection!\\n\")\n return False", "def check_remote_pairing(ignore_errors):\n try:\n DeviceApi().get()\n return True\n except HTTPError as e:\n if e.response.status_code == 401:\n return False\n error = e\n except Exception as e:\n error = e\n\n LOG.warning('Could not get device info: {}'.format(repr(error)))\n\n if ignore_errors:\n return False\n\n if isinstance(error, HTTPError):\n if connected():\n raise BackendDown from error\n else:\n raise InternetDown from error\n else:\n raise error", "async def is_server_ready(self, headers: dict[str, t.Any] = ...) -> bool:", "def test_http_request(self):\n\n response = requests.get(self.live_server_url)\n assert response.status_code == 200", "def check_internet_connection(self):\n while not has_internet():\n time.sleep(5)\n self._logger.info('Internet connection is enabled')", "def is_ready(self, addr: int, /) -> bool:", "def is_alive(self):\n conn = HTTPConnection(self.browser.host, self.browser.port)\n conn.request(\"HEAD\", \"/invalid\")\n res = conn.getresponse()\n return res.status == 404", "def verify_connection(self, datasource):\n url = urljoin(self.base_url, \"Datasources\")\n params = {\"service\": \"ProcessData\", \"allQuotes\": 1}\n if not self.session.verify:\n import urllib3\n\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n res = self.session.get(url, params=params)\n if res.status_code != 200:\n raise ConnectionError\n j = res.json()\n for item in j[\"data\"]:\n if item[\"n\"] == datasource:\n return True\n return False", "def check_connection():\n if connected():\n ws.emit(Message('mycroft.internet.connected'))\n # check for pairing, if not automatically start pairing\n if not is_paired():\n # begin the process\n payload = {\n 'utterances': [\"pair my device\"],\n 'lang': \"en-us\"\n }\n ws.emit(Message(\"recognizer_loop:utterance\", payload))\n else:\n thread = Timer(1, check_connection)\n thread.daemon = True\n thread.start()", "def check(self, target, port):\n pass", "def check_connect():\n arg_parser = resilient.ArgumentParser(resilient.get_config_file())\n host = arg_parser.getopt(\"resilient\", \"host\")\n #\n # Use Openssl first\n #\n print(\"-------------------------------------\")\n print(\"Using openssl to connect to resilient\")\n print(\"-------------------------------------\")\n command = \"openssl s_client -connect {}:443\".format(host)\n user = arg_parser.getopt(\"resilient\", \"email\")\n password = arg_parser.getopt(\"resilient\", \"password\")\n process = subprocess.Popen(\"/bin/bash\", stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n out, err = process.communicate(command)\n cafile = arg_parser.getopt(\"resilient\", \"cafile\")\n verify = True\n if cafile is not None and cafile == \"false\":\n verify = False\n print(out)\n if err is not None:\n print(err)\n\n print(\"---------------------------------------------\")\n print(\"Using python requests to connect to resilient\")\n print(\"---------------------------------------------\")\n\n rest_url = \"https://{}:443/rest/session\".format(host)\n data = '{\"email\": \"' + user + '\",\"password\":\"' + password + '\", \"interactive\": true}'\n try:\n header = {\"Content-Type\": \"application/json\"}\n resp = requests.post(rest_url,\n data=data,\n headers=header,\n verify=verify)\n print(\"\\tResponse: \" + str(resp))\n\n except Exception as e:\n print(\"\\tConnection failed!!\")\n print(\"\\t\" + str(e))", "def check_connection(connected, config):\n\ttry:\n\t\turllib.request.urlopen(\"https://www.google.co.uk\").close()\n\texcept urllib.error.URLError:\n\t\tif connected is True:\n\t\t\tconnected = False\n\t\t\ton_disconnected()\n\t\t\tif config[\"closeOnDisconnect\"] is True or config[\"closeOnChange\"] is True:\n\t\t\t\tsys.exit()\n\telse:\n\t\tif connected is False:\n\t\t\tconnected = True\n\t\t\ton_connected()\n\t\t\tif config[\"closeOnConnect\"] is True or config[\"closeOnChange\"] is True:\n\t\t\t\tsys.exit()\n\treturn connected", "def test_connection():\n result = run(\"uname -a\")\n if result.failed:\n _pretty_output(\"Could not connect to remote server. Please check your configuration\")\n abort(\"Cannot continue. Aborting...\")", "def _check_connection(self, check_db=True) -> None:\n if not self._connected:\n raise InterfaceError(\"Client is not connected to a TerminusDB server.\")\n if check_db and self._db is None:\n raise InterfaceError(\n \"No database is connected. Please either connect to a database or create a new database.\"\n )", "def request_server_address(self, connection):\n address_request = {'type':'sident_verify',\n 'timestamp':calendar.timegm(time.gmtime())}\n self._send_queue.put((address_request, connection))\n return True", "def ready(self):\n if not self.running:\n return False\n\n try:\n response = requests.get(\n 'http://{host}:{port}/'.format(\n host=self.running_host,\n port=self.running_port\n )\n )\n except requests.ConnectionError:\n return False\n\n if response.status_code == 200:\n return True\n elif response.status_code >= 500:\n return False\n else:\n self.logger.warning('Unexpected error code from {}: {}'.format(self.image, response.status_code))\n return True", "def m_apiInstance_ConnectionStatusUpdate(self, sender, e):\r\n if e.Status.IsSuccess:\r\n # Add code here to begin working with the TT API\r\n # lookup an instrument\r\n self.m_req = ttapi.InstrumentLookupSubscription(self.m_apiInstance.Session, ttapi.Dispatcher.Current, ttapi.ProductKey(ttapi.MarketKey.Cme, ttapi.ProductType.Future, \"6J\"), \"Dec17\")\r\n self.m_req.Update += self.m_req_Update\r\n print(\"Connection Success!\")\r\n self.m_req.Start()\r\n else:\r\n print(\"TT Login failed: {0}\".format(e.Status.StatusMessage))\r\n self.Dispose()", "def isAlive(self):\r\n # Just use connectionInit, that is our internal variable\r\n return self.connectionInit", "def Connection(self):\n try:\n system(\n f'netsh advfirewall firewall add rule name=\"Open Port {self.PORT}\" dir=in action=allow protocol=TCP localport={self.PORT} remoteip={self.HOST}')\n with socket() as s: # Create a socket object\n print('Server started!')\n print('Waiting for clients...')\n s.bind((self.HOST, self.PORT)) # Bind to the port\n s.listen(5) # Now wait for client connection.\n self.c, addr = s.accept() # Establish connection with client.\n # Remote client machine connection\n print('Got connection from', addr)\n except error as strerror:\n print(\"Network problems:\", strerror)\n return 0\n return 1", "def is_connected(self):\n if self.server: return True\n return False", "def is_connected(cls,socket):\n pass" ]
[ "0.74695706", "0.7439458", "0.7408965", "0.7146001", "0.7071619", "0.6893049", "0.67745674", "0.6694355", "0.6692175", "0.66580856", "0.6639965", "0.6628134", "0.66238683", "0.6612052", "0.6602828", "0.6601591", "0.65890414", "0.65394765", "0.65368456", "0.6516861", "0.6512574", "0.6506276", "0.6504997", "0.6494682", "0.64894825", "0.64670616", "0.63823855", "0.63645166", "0.63421875", "0.63110524", "0.6286643", "0.62862104", "0.6285366", "0.6263496", "0.6244468", "0.62413746", "0.62246996", "0.62195265", "0.62141675", "0.62141675", "0.62139034", "0.6203254", "0.6187092", "0.6183537", "0.61825114", "0.6182162", "0.6118695", "0.6117179", "0.6106427", "0.60994214", "0.6078848", "0.6063552", "0.6061019", "0.60593116", "0.6053825", "0.60474473", "0.60364646", "0.60346353", "0.6031907", "0.60270077", "0.6024015", "0.60046256", "0.599196", "0.5990864", "0.5986756", "0.59726685", "0.5952868", "0.5947045", "0.5931979", "0.5930812", "0.5924958", "0.5902311", "0.5899394", "0.5896547", "0.5893732", "0.58899647", "0.58893204", "0.5885483", "0.5884683", "0.58818173", "0.5865725", "0.5865594", "0.5862747", "0.5854831", "0.58500063", "0.5840267", "0.58374655", "0.5834914", "0.582434", "0.5821433", "0.58205354", "0.58201647", "0.5817001", "0.581639", "0.5815289", "0.58043444", "0.5804188", "0.5802011", "0.5792943", "0.5792234", "0.578745" ]
0.0
-1
probably the wrost way to parse this captcha
def get_captcha_reply(captcha): def get_char_at(pos, captcha): char_chars = [line[pos-1:pos] for line in captcha.split(b'\n')] key = ''.join([ str(s, 'ascii') for s in char_chars]) if key == ' | ': return get_char_at(pos+2, captcha) if key == ' | .\\ ': return get_char_at(pos+2, captcha) return chars[key] pos = 1 a, size = get_char_at(pos, captcha) pos += size pwn.log.info("a=%d" % a) op, size = get_char_at(pos, captcha) pos += size pwn.log.info('op=%s' % op) b, size = get_char_at(pos, captcha) pos += size pwn.log.info('b=%d' % b) if op == '-': return a - b if op == '*': return a * b if op == '/': return a / b if op == '+': return a + b pwn.log.error("Ops not found (%s)" % op)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_captcha(self):\n res = self._limited_call(self._requests.get,\n constants.FA_ROOT + \"/captcha.jpg\")\n data = res.content\n return data", "def handle_captcha(self):\n self.webdriver.save_screenshot('./out/captcha.png')\n sleep(20)\n\n with open('./out/captcha', 'r') as f:\n try:\n self.webdriver.find_element_by_xpath(\"//input[@aria-label='Type the text you hear or see']\").send_keys(f.read())\n except:\n log.error('Captcha input failed. Possibly incorrect captcha?')\n raise\n\n self.webdriver.find_element_by_xpath('//*[@id=\"identifierNext\"]').click()\n sleep(4)\n\n self.webdriver.find_element_by_css_selector(\"input[type=password]\").send_keys(self.bot.getPassword())", "def get_captcha_challenge(http_body, \n captcha_base_url='http://www.google.com/accounts/'):\n contains_captcha_challenge = False\n captcha_parameters = {}\n for response_line in http_body.splitlines():\n if response_line.startswith('Error=CaptchaRequired'):\n contains_captcha_challenge = True\n elif response_line.startswith('CaptchaToken='):\n # Strip off the leading CaptchaToken=\n captcha_parameters['token'] = response_line[13:]\n elif response_line.startswith('CaptchaUrl='):\n captcha_parameters['url'] = '%s%s' % (captcha_base_url,\n response_line[11:])\n if contains_captcha_challenge:\n return captcha_parameters\n else:\n return None", "def parse_captcha_string(captcha_string: str):\n try:\n if captcha_string.find('?') != -1:\n captcha_string = captcha_string[:captcha_string.find('?')]\n list_digits = captcha_string.split('+')\n if list_digits[1] == '':\n return None\n if int(list_digits[1]) > 25:\n list_digits[1] = list_digits[1][0]\n\n except (ValueError, IndexError) as error:\n print('Cant recognize captcha')\n print(error)\n else:\n return int(list_digits[0]) + int(list_digits[1])", "def get_captcha_image(self, page_html) -> str:\n try:\n items = page_html.select('div[class=\"ddText\"]')\n result_items = re.findall(r'\\\"data:image.*\\\"', str(items[0]))\n result_items = str(result_items).replace(\"\\\"\", \"\")\n except Exception as e:\n raise e\n else:\n return result_items", "def extractCaptcha(x, y, nameInfix=None, debug=False):\n\n\tif nameInfix == None:\n\t\tcaptchaName = \"./captcha/captcha_\" + str(datetime.datetime.now().isoformat()) + \".png\"\n\telse:\n\t\tcaptchaName = \"./captcha/captcha_\" + str(nameInfix) + \".png\"\n\n\treturn extractScreenPart(x-50, y+5, 170, 60, name=captchaName, debug=debug)", "def _handle_verify_code(self):\n while True:\n # r = self.session.get(self._genimage_url.format(code=self.codestring))\n try:\n self.headers[\"Cookie\"] = \"__jsluid=%s; __jsl_clearance=%s; JSESSIONID=%s\" % (self._jsluid, self._jsl_clearance, self.jsessionid)\n vfcode_url = \"http://www.miitbeian.gov.cn/getVerifyCode?%s\" % random.randint(10, 90)\n logger.info(\"Downloading verification code pic: %s\", vfcode_url)\n request = urllib2.Request(vfcode_url,headers=self.headers)\n r = self.opener.open(request, timeout=20)\n s = r.read()\n for cookie in self.cookiejar:\n logger.info(\"Get Cookie step2: %s, %s\", cookie.name, cookie.value)\n if cookie.name == \"JSESSIONID\":\n self.jsessionid = cookie.value\n img_path = \"miitVerf/code.png\"\n with open(img_path, mode='wb') as fp:\n fp.write(s)\n fp.close()\n logger.info(\"Saved verification code to %s\", format(os.path.dirname(img_path)))\n break\n except Exception,e:\n logger.info(e)\n self.vcode = raw_input(\"Please input the captcha:\\n\")\n return self.vcode", "def decoding_the_captcha(captcha, l1=7):\n im = Image.open(captcha)\n im = im.convert(\"RGB\")\n p1 = im.load()\n\n # Filtering the black dots\n for x in range(im.size[0]):\n for y in range(im.size[1]):\n if (p1[x, y][0] < l1) and (p1[x, y][1] < l1) \\\n and (p1[x, y][2] < l1):\n p1[x, y] = (0x80, 0x80, 0x80, 255)\n\n im.save(\"output.png\")\n im.close()", "def captcha(self):\n notification.send_sms(message=message)\n notification.send_emails(emails=email, message=message)\n sleep(25)\n\n ### this code snippet is for reference only, not to be used ###\n # sleep(3)\n # captcha = self.driver.find_element_by_xpath('/html/body/div/iframe[0]')\n # self.driver.switch_to.frame(captcha)\n # captcha_loc = captcha.location\n # print(captcha_loc)\n # captcha_x = captcha_loc[\"x\"]\n # captcha_y = captcha_loc[\"y\"]\n # self.actions.tap_and_hold(captcha_x, captcha_y)\n # sleep(5)\n # self.actions.release(captcha_x, captcha_y)\n # self.search_input()", "def bypass_captcha(self, rps):\n viewstate_pattern = r\"id=\\\"__VIEWSTATE\\\".*\\\"(.*)\\\"\"\n viewstategenerator_pattern = r\"id=\\\"__VIEWSTATEGENERATOR\\\".*\\\"(.*)\\\"\"\n CAPTCHA_PATTERN = r\"id=\\\"ctl00_ContentPlaceHolder1_ctl00_lblCapcha\\\".*?>(.*?)<\\/span>\"\n viewstate = re.search(viewstate_pattern, rps)\n if viewstate:\n viewstate = viewstate.group(1)\n else:\n print(\"VIEWSTATE value not found!\")\n viewstategenerator = re.search(viewstategenerator_pattern, rps)\n if viewstategenerator:\n viewstategenerator = viewstategenerator.group(1)\n captcha = re.search(CAPTCHA_PATTERN, rps)\n if captcha:\n captcha_text = captcha.group(1)\n print(\"[*] CAPTCHA -> [{}]\".format(captcha_text))\n payload = {\n 'ctl00$ContentPlaceHolder1$ctl00$txtCaptcha':captcha_text,\n '__VIEWSTATE':viewstate,\n '__VIEWSTATEGENERATOR':viewstategenerator,\n '__EVENTARGUMENT':'',\n '__EVENTTARGET':'',\n 'ctl00$ContentPlaceHolder1$ctl00$btnXacNhan': 'Vào website'\n }\n rps = self.session.post(url = home_url, headers = BROWSER_HEADERS, data=payload)\n if CAPTCHA_ELEMENT_ID not in rps.text:\n print(\"[*] CAPTCHA BYPASSED\")\n return True\n else:\n print(\"CAPTCHA NOT BYPASSED! PLEASE REPORT TO DEVELOPER BACHVKHOA!\")\n else:\n print(\"[*] CAPTCHA NOT FOUND\")\n return False", "def parse(self, response):\n if self._has_captcha(response):\n result = self._handle_captcha(response, self.parse)\n else:\n result = super(AmazonBaseClass, self).parse(response)\n\n return result", "def get_sms_captcha(self, img_ts, img_captcha):\n url = \"http://api.applezhuan.com/api/get_sms_captcha?&\"\n params = {\n \"img_captcha\": img_captcha,\n \"time\": self.get_current_time,\n \"ts\": img_ts,\n \"device_code\": self.device_code,\n \"mobile\": self.mobile.mobile\n }\n params_str = self.encrypt.get_secret_param(params)\n url = url + \"s=\" + params_str\n headers = {\n \"Accept-Language\": \"zh-CN,zh;q=0.8\",\n \"User-Agent\": \"Mozilla/5.0 (Linux; U; Android \" + self.mobile.os + \"; zh-cn; GT-N7100 Build/\" +\n self.mobile.brand + \") AppleWebKit/534.30 (KHTML, like Gecko) \"\n \"Version/4.0 Mobile Safari/534.30\",\n \"Host\": \"api.applezhuan.com\",\n \"Connection\": \"Keep-Alive\",\n \"Accept-Encoding\": \"gzip\",\n \"Cookie\": self.cookie\n }\n\n res = requests.get(url, headers=headers)\n # print(res.text)\n result = json.loads(res.text)\n return result", "def _hackBotchedCard(self, card, res):\n\t\tmat = re.match(r\"([^\\s=]*)\\s*=\\s*([^/]+)\", card.cardimage)\n\t\tif mat:\n\t\t\tres[mat.group(1)] = mat.group(2).strip()\n\t\telse: # Card beyond recognition, ignore\n\t\t\tpass", "def solve_image_captcha(self, captcha_tmp_path):\n # Get solution and apply it\n for i in range(1, 4):\n print(f\"Attempt #{i} for recaptcha solution\")\n solution = self.obtain_image_captcha(captcha_tmp_path)\n print(f'this {solution}')\n if solution and ERROR not in solution.upper():\n break\n\n if solution is None or ERROR in solution.upper():\n if not solution:\n message = f\"2Captcha service didn't return a response for the captcha\"\n else:\n message = f\"Error in captcha solution from 2Captcha: {solution}\"\n return None\n\n print(\"Captcha solution: {}\".format(solution))\n return solution", "def handle_captcha(thread_call, thread_r):\n import subprocess\n\n iden = thread_r['captcha']\n\n subprocess.call(['open', reddit_url + 'captcha/' + iden])\n thread_call['captcha'] = input(\"Captcha (enclose in quotes):\")\n thread_call['iden'] = iden\n\n request = session.post(reddit_url + 'api/submit', data=thread_call, cookies=cookie)\n thread_r = request.json()['json']['data']\n print request.json()\n if len(thread_r['errors']) > 0:\n debug_printer.pprint(thread_r)", "def solve_captcha(self):\n # Switch to the Captcha's iframe\n captcha = CapatchaSolver(self.driver)\n while True:\n self.driver.switch_to.frame(self.driver.find_element_by_tag_name(\"iframe\"))\n captcha.solve_captcha()\n # Check if we passed the captcha part by checking the page title\n wait = WebDriverWait(self.driver, 10)\n try:\n wait.until_not(EC.title_is(consts.BLOCKED))\n break\n except TimeoutException:\n self.driver.refresh()", "def solve_captcha_manual(gid):\n image = auth.get_captcha_image(gid)\n # FIXME: Use Python's temp file interface.\n image.save(\"./test.png\")\n webbrowser.open_new_tab(\"./test.png\")\n text = input('solve_captcha --->')\n return text", "def handle_verify_code(self, code):\n r = self.session.get(self.image_url_format.format(code=code))\n\n # FIXME use terminal better\n img_path = os.path.expanduser('~/') + 'pansh.{}.vcode.png'.format(hash(self.username))\n with open(img_path, mode='wb') as fp:\n fp.write(r.content)\n print(\"Saved verification code to {}\".format(os.path.dirname(img_path)))\n vcode = raw_input(\"Please input the captcha:\\n\")\n return vcode", "def get_image_response(self, captcha_id):\n url = 'http://2captcha.com/res.php'\n data = {'key': self.api_key, 'action': 'get',\n 'id': captcha_id, 'json': 1}\n response = self.session.post(url, data=data)\n json_response = json.loads(response.text)\n recaptcha_answer = json_response[\"request\"]\n finished = False\n for _ in range(20): # For making up to 120 seconds of waits\n if 'CAPCHA_NOT_READY' not in response.text:\n finished = True\n break\n # Time Requested by the web page\n sleep(6)\n response = self.session.post(url, data=data)\n json_response = json.loads(response.text)\n recaptcha_answer = json_response[\"request\"]\n\n if not finished:\n return False\n\n return recaptcha_answer", "def writerep_general(contact_link, i):\n\n b = browser.Browser()\n print \"In writerep_general, opening contact_link\", contact_link\n b.open(contact_link)\n\n def get_challenge():\n ''' find captchas'''\n labels = b.find_nodes('label', lambda x: x.get('for') == 'HIP_response')\n if labels: return labels[0].string\n \n def fill_inhofe_lgraham(f):\n \"\"\"special function to fill in forms for inhofe and lgraham\"\"\"\n if DEBUG: print \"Filling special inhofe or lgraham form\"\n f.fill_all(A01=i.prefix, B01=i.fname, C01=i.lname, D01=i.addr1, E01=i.addr2, F01=i.city,\n G01=i.state, H01=i.zip5, H02=i.phone, H03=i.phone, I01=i.email, J01=\"Communications\", K01=i.full_msg)\n f.fill(type='textarea', value=i.full_msg)\n if DEBUG: print \"f filled and ready to submit: \", f\n \n def fill_form(f):\n ''' f is a form '''\n\n f.fill_name(i.prefix, i.fname, i.lname)\n if DEBUG: print \"in fill_form, filling addr\"\n f.fill_address(i.addr1, i.addr2)\n if DEBUG: print \"in fill_form, filling phone\"\n f.fill_phone(i.phone)\n if DEBUG: print \"in fill_form, filling textarea\"\n textareacontrol = f.fill(type='textarea', value=i.full_msg)\n if DEBUG: print 'filled textareacontrol' , textareacontrol\n if DEBUG: print \"in fill_form, filling all\"\n\n if DEBUG: print \"Printing all controls\"\n for c in f.controls:\n if DEBUG: print \"control: \", c.name, \" type: \", c.type\n \n f.fill_all(city=i.city, zipcode=i.zip5, zip4=i.zip4, state=i.state.upper(),\n email=i.email,\n issue=['TECH', 'GEN', 'OTH'],\n subject=i.subject, reply='yes',\n Re='issue', #for billnelson\n newsletter='noAction', aff1='Unsubscribe',\n MessageType=\"Express an opinion or share your views with me\")\n\n # page has one required control that has no name. so we need to fill it in\n if (i.dist == 'SD-00' or 'coburn' in b.url):\n empty_controls = [c for c in f.controls if not c.value]\n for c in empty_controls:\n if DEBUG: print f.fill('OTH', control=c)\n\n \n\n\n # Solve captchas. I included this here because it was placed here by Aaron,\n # but I haven't found a captcha that it works on. -NKF\n challenge = get_challenge()\n if challenge:\n print \"Found challenge!\"\n try:\n solution = captchasolver.solve(challenge)\n except Exception, detail:\n print >> sys.stderr, 'Exception in CaptchaSolve', detail\n print >> sys.stderr, 'Could not solve:\"%s\"' % challenge,\n \n if DEBUG: print \"f filled and ready to submit to \", b.url, \"\\n\", f\n #return b.open(f.click())\n \n \n\n # max loops\n k = 6\n\n # needed this from some weird error that I forgot to document.\n # we only want to do the WYR form once,\n # so it's a flag so we don't choose this one again. \n completedWyrForm = False\n for cnt in range(1,k):\n # todo, place newurl into cache\n if DEBUG: print \"Loop \", cnt, \":\\n\", b.url, \"\\n\" #, b.page, \"\\n Done with page \", cnt, \"\\n\\n\"\n\n # check if this is a refresh page\n # to do: see if we can get javascript window.location refreshes\n # (would require some smart parsing or using a javascript interpreter module)\n if 'http-equiv=\"refresh\"' in b.page:\n if DEBUG: print \"Redirect to a new page:\"\n newurl = r_refresh.findall(b.page)[0]\n newurl = newurl.replace(' ', '%20')\n newurl = newurl.replace('&amp;', '&')\n if DEBUG: print \"\\nNewurl:\", newurl\n try:\n b.open(newurl)\n continue #next loop\n except:\n print \"Failed to open url \", newurl, \" error: \", traceback.print_exc()\n\n # some pages have multiple forms on them.\n # For example, there may be a search tool in the sidebar.\n # or there may be forms which are hidden by not displayed by the css.\n # try to see what we can grab out the page, then we'll decide which one's the best to try\n textareaform = get_form(b, lambda f: f.find_control_by_type('textarea'))\n zipform = get_form(b, lambda f: f.has(name='zip'))\n verificationform = get_form(b, lambda f: 'formproc' in f.action)\n nameform = get_form(b, lambda f: 'wrep_const' in f.action) #see AL-06 for an example, has zip form in page too\n wyrform = get_form(b, lambda f: f.find_control_by_id('state') and f.find_control_by_name('zip') and f.find_control_by_name('zip4')) #get_form(b, not_signup_or_search)\n indexform = get_form(b, lambda f: f.has(name='Re')) # see billnelson for example\n\n #choose which form we want to use\n form = None\n if textareaform:\n if DEBUG: print \"textareaform\"\n form = textareaform\n elif wyrform and not completedWyrForm:\n if DEBUG: print \"wyrform\"\n form = wyrform\n completedWyrForm = True\n elif nameform:\n if DEBUG: print \"step2 contact form with name\"\n form = nameform\n elif zipform:\n if DEBUG: print \"zipform\"\n form = zipform\n elif verificationform:\n if DEBUG: print \"verification form\"\n form = verificationform\n elif indexform:\n if DEBUG: print \"index form\"\n form = indexform\n\n #if no redirect and no form was found, just return. can go no further\n if not form:\n return b.page\n \n \n #to do, add back in captcha solver\n if form.find_control_by_name('captcha') or form.find_control_by_name('validation'):\n if DEBUG: print \"captcha found\"\n #raise Captcha\n return b.page\n else:\n if DEBUG: print \"no captcha found\"\n\n #try:\n if DEBUG: print \"going to fill_form from \", b.url, \" now \\n\", form, \"\\n End form\", cnt, \"\\n\"\n if \"inhofe\" in contact_link or \"lgraham\" in contact_link:\n fill_inhofe_lgraham(form)\n else:\n fill_form(form) #, aggressive=True)\n\n try:\n nextpage = b.open(form.click())\n except:\n print \"caught an http error\"\n print \"Failed to submit form for url \", b.url, \" error: \", traceback.print_exc()\n return \"Failed to submit form for url \"+ b.url+ \" error: \"+ traceback.format_exc()\n\n \n # Now, look for common errors or confirmations.\n foundError = False\n thanked = False\n if DEBUG: print \"Looking for errors in page \" #, b.page\n \n errorStr = getError(b.page)\n if errorStr:\n if DEBUG: print \"Found error: \", errorStr, \" done with \", contact_link\n foundError = True\n\n if DEBUG: print \"Looking for thank you in page: \"# , nextpage.lower()\n confirmations=[cstr for cstr in confirmationStrings if cstr in nextpage.lower()]\n\n if len(confirmations) > 0:\n print 'thanked, done with ', contact_link\n thanked = True\n\n successUrls = ['https://mulvaneyforms.house.gov/submit-contact.aspx']\n if b.url in successUrls:\n thanked = True\n\n if thanked or foundError:\n return nextpage\n\n if DEBUG: print \"Tried \", k, \"times, unsuccessfully, to fill form\"\n return b.page\n #raise UnsuccessfulAfter5Attempts(b.page) ", "def receive_capturing_validation(self):\n reply = self.socket.recv(1)\n if reply[0] == codes['timeout']:\n print(\"Ocurrió un timeout en la conexión\")\n self.close_connection()\n if bytes_to_int(reply) == codes['already_have_all']:\n print(\"Ya tenías todos los pokémones. Has completado el juego.\")\n self.receive_session_termination()\n\n elif bytes_to_int(reply) == codes['already_have_pokemon']:\n print(\"Ya tienes el pokémon sugerido. Intentaré encontrarte otro.\")\n self.receive_pokemon_suggestion()\n\n elif bytes_to_int(reply) == codes['do_not_have_pokemon']:\n print(\"Tu pokédex no reconoce a este pokémon. Intenta capturarlo!\")\n captured = False\n while not captured:\n captured = self.verify_capture()\n if captured:\n break\n again = \"\"\n while again != \"y\" and again != \"n\":\n again = input(\"Quieres tratar de nuevo? (y/n): \")\n if again == \"n\":\n self.socket.sendall(pack('B', codes['no']))\n self.receive_session_termination()\n elif again == \"y\":\n self.socket.sendall(pack('B', codes['yes']))\n if captured:\n print(\"Lo capturaste\")\n self.receive_image()\n self.receive_session_termination()", "def getTasseledCap(img):", "def corp_image(self):\n try:\n # Open image\n image_to_crop = Image.open(self.captcha_image_filename, 'r')\n # Crop image\n image = image_to_crop.crop((-1, 8, 65, 22))\n # Save image\n image.save(self.cropped_captcha_filename)\n except UnidentifiedImageError as error:\n raise(error)", "def split_dotted_f(captcha):\n # Cropping captcha so that the first letter (f) is not included\n image = captcha[19:46, 36:]\n \n col_sum = np.sum(image, axis = 0)\n col_sum_list = list(col_sum)\n\n # Finding all the dark regions\n # beggining and end of all dark regions)\n x = 1\n i = 0\n dark_regions = []\n while i < 164:\n if col_sum_list[i] == 0:\n dark_region_beg = i\n while col_sum_list[i + x] == 0:\n x = x + 1\n if (x + i) > 163:\n break\n dark_region_end = i + x - 1\n dark_region = (dark_region_beg, dark_region_end)\n dark_regions.append(dark_region)\n i = x + i + 1\n x = 1\n else:\n i = i + 1\n\n # Identifying leftmost and rightmost dark regions and popping them out of the list\n left_region = dark_regions[0]\n right_region = dark_regions[-1]\n dark_regions.pop(0)\n dark_regions.pop(-1)\n\n # Sorting dark regions according to their length\n four_regions = sorted(dark_regions, key = lambda x: x[1] - x[0], reverse = True)\n\n gaps = []\n lines = []\n for i, region in enumerate(four_regions):\n gap = mt.ceil((region[1] - region[0]) / 2)\n if gap == 0:\n continue\n gaps.append(gap)\n lines.append(region[0] + gap)\n\n # If more than 4 remaining gaps are identified, the problem may be due to split letters\n # Some of the troublesome letters are m, n and h\n # We will try to fix this issue by completing gaps in these letters\n if len(lines) > 4:\n\n for i in range(len(col_sum_list[:-9])):\n if col_sum_list[i:i+9] == [0, 0, 0, 0, 510, 510, 0, 3060, 3060]:\n captcha[28:30, i+1:i+3] = 255\n if col_sum_list[i:i+9] == [0, 0, 0, 0, 510, 510, 0, 2550, 2550]:\n captcha[31:33, i+1:i+3] = 255\n if col_sum_list[i:i+9] == [0, 3060, 3060, 0, 510, 510, 0, 0, 0, 0]:\n captcha[28:30, i+7:i+9] = 255\n if col_sum_list[i:i+9] == [0, 2550, 2550, 0, 510, 510, 0, 0, 0, 0]:\n captcha[31:33, i+7:i+9] = 255\n if col_sum_list[i:i+9] == [0, 4080, 4080, 0, 0, 0, 0, 510, 510]:\n captcha[31:33, i+4:i+6] = 255\n\n # Reloading image (based on modified captcha) and redefiding col_sum_list\n image = captcha[19:46, 36:]\n col_sum_list = list(np.sum(image, axis = 0))\n\n # Finding all the dark regions\n # beggining and end of all dark regions)\n x = 1\n i = 0\n dark_regions = []\n while i < 164:\n if col_sum_list[i] == 0:\n dark_region_beg = i\n while col_sum_list[i + x] == 0:\n x = x + 1\n if (x + i) > 163:\n break\n dark_region_end = i + x - 1\n dark_region = (dark_region_beg, dark_region_end)\n dark_regions.append(dark_region)\n i = x + i + 1\n x = 1\n else:\n i = i + 1\n\n # Identifying leftmost and rightmost dark regions and popping them out of the list\n left_region = dark_regions[0]\n right_region = dark_regions[-1]\n dark_regions.pop(0)\n dark_regions.pop(-1)\n\n # Sorting dark regions according to their length\n four_regions = sorted(dark_regions, key = lambda x: x[1] - x[0], reverse = True)\n\n # Building a list of GAPS (lengths of the dark regions)\n # and LINES that split such gaps in half\n gaps = []\n lines = []\n for i, region in enumerate(four_regions):\n gap = mt.ceil((region[1] - region[0]) / 2)\n if gap == 0:\n continue\n gaps.append(gap)\n lines.append(region[0] + gap)\n\n # If the errors persists, we move on to next captcha\n if len(lines) > 4:\n return('error')\n\n # If the algorithm finds less letters than expected (merged letters), we move on to next captcha\n if len(lines) < 4:\n return('error')\n\n # Defining rightmost and leftmost lines, appending lines list, and sorting\n left_line = 0\n right_line = right_region[0] + 2\n lines.append(left_line)\n lines.append(right_line)\n lines = sorted(lines)\n\n # Adjusting coordinates to account for deleting first letter\n lines = list(map(lambda x: x + 36, lines))\n\n # Finding letters x-coordinates (coordinates for initial r are already included)\n letters_xcoords = [(26, 37)]\n for i in range(len(lines)):\n if lines[i] == lines[-1]:\n break\n letter = (lines[i], lines[i + 1])\n letters_xcoords.append(letter)\n\n # Finding letters in the captcha, using the x-coordinates\n letters = []\n for i, letter in enumerate(letters_xcoords):\n letter_image = captcha[:60, letter[0]:letter[1]]\n letters.append(letter_image)\n\n return(letters)", "def captcha_validation(token: str):\n url = \"https://www.google.com/recaptcha/api/siteverify\"\n secret = json.loads(get_secret(\"CAPTCHA_SECRET\"))['CAPTCHA_SECRET']\n payload = {\n \"secret\": secret,\n \"response\": token\n }\n response_raw = requests.post(url, data=payload)\n response_text = response_raw.text\n logger.debug(response_text)\n response = json.loads(response_text)\n return response['success']", "def generate_challenge(self):\n return None", "def _validate_captcha(data):\n settings = api.config.get_settings()[\"captcha\"]\n\n post_data = urllib.parse.urlencode(\n {\n \"secret\": settings[\"reCAPTCHA_private_key\"],\n \"response\": data[\"g-recaptcha-response\"],\n \"remoteip\": flask.request.remote_addr,\n }\n ).encode(\"utf-8\")\n\n request = urllib.request.Request(settings[\"captcha_url\"], post_data, method=\"POST\")\n response = urllib.request.urlopen(request).read().decode(\"utf-8\")\n parsed_response = json.loads(response)\n return parsed_response[\"success\"] is True", "def parse_kiss(self):\n frame_len = len(self.frame)\n\n if frame_len < 16:\n self._logger.debug('Frame len(%s) < 16, Exiting.', frame_len)\n return\n\n for raw_slice in range(0, frame_len):\n\n # Is address field length correct?\n # Find the first ODD Byte followed by the next boundary:\n if (ord(self.frame[raw_slice]) & 0x01\n and ((raw_slice + 1) % 7) == 0):\n\n i = (raw_slice + 1) / 7\n\n # Less than 2 callsigns?\n if 1 < i < 11:\n # For frames <= 70 bytes\n if frame_len >= raw_slice + 2:\n if (ord(self.frame[raw_slice + 1]) & 0x03 == 0x03 and\n ord(self.frame[raw_slice + 2]) in\n [0xf0, 0xcf]):\n self._extract_kiss_text(raw_slice)\n self._extract_kiss_destination()\n self._extract_kiss_source()\n self._extract_kiss_path(i)", "def ocr_correction(token):", "def obtain_image_captcha(self, file_path):\n id_answer = self.post_image_task(file_path)\n if not id_answer:\n message = f\"Unable to obtain response for request of captcha from 2Captcha\"\n print(message)\n return None\n\n try:\n captcha_id = int(id_answer)\n except ValueError:\n message = f\"Error in captcha request from 2Captcha: {id_answer}\"\n print(message)\n return None\n\n recaptcha_answer = self.get_image_response(captcha_id)\n if not recaptcha_answer:\n message = f\"Unable to obtain response for captcha image solution from 2Captcha\"\n print(message)\n return None\n\n print(f\"Output from 2Captcha {recaptcha_answer}\")\n return recaptcha_answer", "def twocaptcha_solver():\n SITE_URL = get_site_settings()[1]\n SITE_KEY = get_site_settings()[0] # osrs site key\n API_KEY = get_user_settings()[2] # api key read from settings.ini\n if not API_KEY:\n raise ValueError(\"No API key was found in settings.ini.\")\n\n s = requests.Session()\n\n # here we post and parse site key to 2captcha to get captcha ID\n try:\n captcha_id = s.post(f\"http://2captcha.com/in.php?key={API_KEY}\"\n f\"&method=userrecaptcha&googlekey={SITE_KEY}\"\n f\"&pageurl={SITE_URL}\").text.split('|')[1]\n except IndexError:\n print(\"You likely don't have a valid 2captcha.com API key with funds\"\n \" in your settings.ini file. Fix and re-run the program.\")\n\n # then we parse gresponse from 2captcha response\n recaptcha_answer = s.get(\n f\"http://2captcha.com/res.php?key={API_KEY}\"\n f\"&action=get&id={captcha_id}\").text\n print(\"Solving captcha...\")\n while 'CAPCHA_NOT_READY' in recaptcha_answer:\n sleep(6)\n recaptcha_answer = s.get(\n f\"http://2captcha.com/res.php?key={API_KEY}\"\n f\"&action=get&id={captcha_id}\").text\n try:\n recaptcha_answer = recaptcha_answer.split('|')[1]\n except IndexError:\n print(\"2captcha failed to solve this one.. Returning a blank response \"\n \"If the program fails to continue, please msg Gavin with error.\")\n recaptcha_answer = ''\n else:\n return recaptcha_answer", "def get(self):\n try:\n imageFilename = random.choice(os.listdir(self.cacheDir))\n imagePath = os.path.join(self.cacheDir, imageFilename)\n with open(imagePath) as imageFile:\n self.image = imageFile.read()\n except IndexError:\n raise GimpCaptchaError(\"CAPTCHA cache dir appears empty: %r\"\n % self.cacheDir)\n except (OSError, IOError):\n raise GimpCaptchaError(\"Could not read Gimp captcha image file: %r\"\n % imageFilename)\n\n self.answer = imageFilename.rsplit(os.path.extsep, 1)[0]\n self.challenge = self.createChallenge(self.answer)\n\n return (self.image, self.challenge)", "def get_captcha_key(self, captcha_image_url):\n\n if self.interactive:\n print('Open CAPTCHA image url in your browser and enter it below: ',\n captcha_image_url)\n captcha_key = raw_input('Enter CAPTCHA key: ')\n return captcha_key\n else:\n raise VkAuthError(\n 'Captcha is required. Use interactive mode to enter it '\n 'manually')", "def askForCaptcha(self, url):\n try:\n import webbrowser\n wikipedia.output(u'Opening CAPTCHA in your web browser...')\n if webbrowser.open(url):\n return wikipedia.input(\n u'What is the solution of the CAPTCHA that is shown in '\n u'your web browser?')\n else:\n raise\n except:\n wikipedia.output(u'Error in opening web browser: %s'\n % sys.exc_info()[0])\n wikipedia.output(\n u'Please copy this url to your web browser and open it:\\n %s'\n % url)\n return wikipedia.input(\n u'What is the solution of the CAPTCHA at this url ?')", "def extract_critic_input(self, data):\n return data[1]", "async def enter_captcha(self, url, sid):\n raise VkCaptchaNeeded(url, sid)", "def __init__(self, anticaptcha_key, gb=True):\n self.solver = hCaptchaProxyless()\n self.solver.set_key(anticaptcha_key)\n self.solver.set_website_url(\"https://2ch.hk/\")\n # self.solver.set_website_url(\"https://2ch.pm/\")\n # self.solver.set_verbose(1) # debug\n self.solver.set_website_key(\"248cebfd-9b3f-4d8c-88b5-f812daf51261\") # 2ch google captcha site key\n\n if gb:\n self.get_balance()", "def controls(email):", "def init_home_page(self):\n rps = self.session.get(home_url, headers = BROWSER_HEADERS)\n # with open('first_get.html', 'w') as f: f.write(rps.text)\n if CAPTCHA_ELEMENT_ID in rps.text:\n # print(\"CAPTCHA ELEMENT DETECTED!\")\n return self.bypass_captcha(rps.text)\n else:\n print(\"NO CAPTCHA\")\n return True", "def split_dotted(captcha):\n if f_is_first(captcha):\n letters = split_dotted_f(captcha)\n else:\n letters = split_dotted_general(captcha)\n\n return(letters)", "def require_auth_captcha(self, response, query_params,\n login_form_data, http_session):\n logger.info('Captcha is needed. Query params: %s', query_params)\n form_text = response.text\n\n action_url = parse_form_action_url(form_text)\n logger.debug('form action url: %s', action_url)\n if not action_url:\n raise VkAuthError('Cannot find form action url')\n\n captcha_sid, captcha_url = parse_captcha_html(\n html=response.text, response_url=response.url)\n logger.info('Captcha url %s', captcha_url)\n\n login_form_data['captcha_sid'] = captcha_sid\n login_form_data['captcha_key'] = self.get_captcha_key(captcha_url)\n\n response = http_session.post(action_url, login_form_data)\n return response", "def captcha_to_text(self, imagepath):\n return pytesseract.image_to_string(Image.open(imagepath))", "def image_to_string(self):\n img = Image.open(self.cropped_captcha_filename)\n config = '--psm 10 --oem 1 -c tessedit_char_whitelist=0123456789+?'\n try:\n return pytesseract.image_to_string(img, config=config)\n except pytesseract.pytesseract.TesseractNotFoundError:\n raise(\"Tesseract не установлен!\")\n exit(-1)", "def ocr(H, I):\n dp = [[False for _ in range(len(I)+1)] for _ in range(len(H)+1)]\n dp[0][0] = True\n \n for i in range(1, len(dp[0])):\n if I[i-1] == '@':\n dp[0][i] = True \n \n for i in range(1, len(H)+1):\n for j in range(1, len(I)+1):\n \n if H[i-1] == I[j-1] or I[j-1] == '$':\n dp[i][j] = dp[i-1][j-1]\n \n elif I[j-1] == '@':\n dp[i][j] = dp[i-1][j] or dp[i][j-1]\n \n else:\n dp[i][j] = False\n \n return dp[-1][-1]", "def graphtextdetextor(image_path):\n img=cv2.imread(image_path)\n\n #img=image_filter.rotate_anticlockwise(img)\n\n\n custom_config_number=r'--oem 3 --psm 6 outputbase digits'\n custom_config=r'--oem 3 --psm 6'\n\n custom_config1=r'--oem 3 --psm 1'\n\n custom_config2=r'--oem 3 --psm 4'\n\n text=pytesseract.image_to_string(img,config=custom_config)\n text2=pytesseract.image_to_string(img,config=custom_config1)\n text3=pytesseract.image_to_string(img,config=custom_config2)\n\n\n\n d=pytesseract.image_to_data(img,config=custom_config,output_type=Output.DICT)\n\n #print(text3)\n return [text,text2,text3]", "def cipher_feedback(self):", "def test_5_signin(self):\n print \"获取验证码token\"\n r = requests.post(gl.url + ':7130/account/v1/get_captcha_token')\n print r, r.status_code, r.json()[\"captcha_token\"], r.json()[\"message\"], r.json()[\"code\"]\n self.assertEqual(r.status_code, 200)\n ##self.assertEqual(r.json()[\"message\"], \"操作成功\")\n self.assertEqual(r.json()[\"code\"], 0)\n gl.captcha_token = r.json()[\"captcha_token\"]\n self.assertIsNotNone(gl.captcha_token)\n\n print \"获取验证码\"\n r = requests.get(gl.url + ':7130/account/v1/get_captcha_image' + '?captcha_token=' + gl.captcha_token)\n print r, r.status_code, r.json()[\"captcha_value\"]\n self.assertEqual(r.status_code, 200)\n self.assertIsNotNone(r.json()[\"captcha_value\"])\n gl.captcha_value = r.json()[\"captcha_value\"]\n\n print \"发送验证码\"\n d = \"{\\\"purpose\\\": \\\"signin\\\", \\\"phone\\\": \\\"\"+gl.invitation_phoneNo+\"\\\", \\\"Source\\\": \\\"web\\\", \\\"captcha_token\\\":\\\"\" + gl.captcha_token + \"\\\",\\\"captcha_value\\\":\\\"\" + gl.captcha_value + \"\\\"}\"\n print \"传入参数:\" + d\n r = requests.post(gl.url + ':7130/account/v1/send_verify_code', data=d)\n print r, \"返回值:\" + r.text\n self.assertEqual(r.status_code, 200)\n gl.verify_code = r.json()[\"verify_code\"]\n self.assertIsNotNone(r.json()[\"verify_code\"])\n\n print \"验证码校验\"\n d = \"{\\\"purpose\\\": \\\"signin\\\", \\\"phone\\\": \\\"\"+gl.invitation_phoneNo+\"\\\",\\\"Source\\\": \\\"web\\\", \\\"verify_code\\\":\\\"\" + gl.verify_code + \"\\\"}\"\n print \"传入参数:\" + d\n r = requests.post(gl.url + ':7130/account/v1/check_verify_code', data=d)\n print r, \"返回值:\" + r.text\n self.assertEqual(r.status_code, 200)\n\n print \"登录\"\n d = \"{\\\"password\\\": \\\"\"+gl.invitation_pwd+\"\\\", \\\"phone\\\": \\\"\"+gl.invitation_phoneNo+\"\\\",\\\"Source\\\": \\\"web\\\", \\\"captcha_token\\\":\\\"\" + gl.captcha_token + \"\\\",\\\"captcha_value\\\":\\\"\" + gl.captcha_value + \"\\\"}\"\n print \"传入参数:\" + d\n r = requests.post(gl.url + ':7130/account/v1/sign_in', data=d)\n print r, \"返回值:\" + r.text\n self.assertEqual(r.status_code, 200)\n self.assertIsNotNone(r.json()[\"token\"])\n gl.account_token = r.json()[\"token\"]", "def gen_captcha(**kwargs):\n from PIL import ImageFile\n from PIL import Image\n from PIL import ImageFont\n from PIL import ImageDraw\n from PIL import ImageFilter\n import random\n from PIL import ImageFile as pyImageFile\n import sys\n sys.modules['ImageFile'] = pyImageFile\n from io import StringIO, BytesIO\n # CHAR_BIT=(4,5,6,7,8)\n # CHAR_TYPE=(1,2,3)\n #随机选择字符位数和类型.\n # text=getstr( random.choice(CHAR_BIT), random.choice(CHAR_TYPE))\n text = kwargs.get('text', None)\n fnt_sz = kwargs.get('size', DEFAULT_IMAGE_SIZE)\n bkground = kwargs.get('bkground', DEFAULT_BG)\n font_color = kwargs.get('font_color', DEFAULT_FONT_COLOR)\n distortion = kwargs.get('distortion', DEFAULT_DISTORTION)\n addWidth = kwargs.get('addWidth', None)\n addHeight = kwargs.get('addHeight', None)\n\n period = distortion[0]\n amplitude = distortion[1]\n offset = distortion[2]\n\n## outFile = StringIO()\n outFile = BytesIO()\n\n DATA_PATH = os.path.abspath(os.path.dirname(__file__))\n FONT_PATH = DATA_PATH + '/fonts'\n\n # select font for captcha\n ALL_FONTS = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12')\n rand_font = random.choice(ALL_FONTS)\n \"\"\"font = ImageFont.truetype(FONT_PATH+'/font%s.ttf'%rand_font, fnt_sz)\"\"\"\n font = ImageFont.truetype(FONT_PATH + '/font' + rand_font + '.ttf', fnt_sz)\n\n #依据需求认定图片大小\n # textSize =[165,50]\n textSize = [kwargs.get('width', 165), kwargs.get('height', 50)]\n factTextSize = font.getsize(text)\n\n #如果定义尺寸小于实际尺寸则用实际的尺寸\n if factTextSize[0] > textSize[0]:\n textSize[0] = factTextSize[0]\n if factTextSize[1] > textSize[1]:\n textSize[1] = factTextSize[1]\n#------------------------------render background1 -----------------------\n image = Image.new(\n 'RGB', (textSize[0] + addWidth, textSize[1] + addHeight), bkground)\n image.paste(bkground)\n#------------------------------render Text2 ------------------------\n draw = ImageDraw.Draw(image)\n alignment = (random.uniform(0, 1), random.uniform(0, 1))\n x = int((image.size[0] - textSize[0]) * alignment[0] + 0.5)\n y = int((image.size[1] - textSize[1]) * alignment[1] + 0.5)\n\n draw.text((x, y), text, font=font, fill=font_color)\n#--------------new add line i值越大线越粗------------------------\n width, height = image.size\n for i in range(0, 3):\n draw.line(((0, height / 1 + i), (width, height / 8 + i)), fill=128)\n\n#------------------------------render Distortion -----------------------\n r = 1\n xPoints = image.size[0] //r + 2\n yPoints = image.size[1] //r + 2\n\n # Create a list of arrays with transformed points\n xRows = []\n yRows = []\n for j in range(yPoints):\n xRow = []\n yRow = []\n for i in range(xPoints):\n x, y = getTransform(i * r, j * r, amplitude, period, offset)\n\n # Clamp the edges so we don't get black undefined areas\n x = max(0, min(image.size[0] - 1, x))\n y = max(0, min(image.size[1] - 1, y))\n\n xRow.append(x)\n yRow.append(y)\n xRows.append(xRow)\n yRows.append(yRow)\n\n # Create the mesh list, with a transformation for\n # each square between points on the grid\n mesh = []\n for j in range(yPoints - 1):\n for i in range(xPoints - 1):\n mesh.append((\n # Destination rectangle\n (i * r, j * r,\n (i + 1) * r, (j + 1) * r),\n # Source quadrilateral\n (xRows[j][i], yRows[j][i],\n xRows[j + 1][i], yRows[j + 1][i],\n xRows[j + 1][i + 1], yRows[j + 1][i + 1],\n xRows[j][i + 1], yRows[j][i + 1]),\n ))\n\n img = image.transform(image.size, Image.MESH, mesh, Image.BILINEAR)\n\n # save the image to a file\n img.save(outFile, format='jpeg')\n outFile.seek(0)\n # img.save(\"captchas.jpg\") #测试用,正式系统请删除.\n src = outFile.read()\n size = len(src)\n sys.modules['ImageFile'] = ImageFile\n return {'text': text, 'src': src, 'size': size}", "def traffic_sign_detection(img_in):\n raw_img = np.copy(img_in)\n DetectedObj = {}\n\n ################################### \n ### Detecting the traffic light ###\n ################################### \n\n thresh1 = 110\n thresh2 = 60\n cannyEdges = cv2.Canny(img_in, thresh1, thresh2)\n\n circles = cv2.HoughCircles(cannyEdges,cv2.HOUGH_GRADIENT, 1, 20, param1=50,param2=26,minRadius=0,maxRadius=50)\n circles_selected = select_three(circles)\n\n if circles_selected != None:\n column = circles_selected[1][0]\n row = circles_selected[1][1]\n coordinates = (column, row)\n DetectedObj['Traffic_Sign'] = coordinates\n #cv2.circle(img_in, (circle[0], circle[1]), circle[2], (255, 0, 0), 2)\n\n\n ################################### \n ### Detecting the No_Entry sign ###\n ################################### \n\n for circle in circles[0, :]:\n column = circle[0]\n row = circle[1]\n coordinates = (column, row)\n state_pixels = img_in[int(row), int(column), :]\n if state_pixels[0] > 230 and state_pixels[1] > 230 and state_pixels[2] > 230 :\n DetectedObj['No_Entry'] = coordinates\n\n ################################# \n ### Detecting the Yield sign ###\n #################################\n\n coordinates = yield_sign_detection(img_in)\n if coordinates != None:\n DetectedObj['Yield'] = coordinates\n\n ################################# \n ### Detecting the Stop sign ###\n #################################\n\n coordinates = stop_sign_detection(img_in)\n if coordinates != None:\n DetectedObj['Stop'] = coordinates\n\n ################################# \n ### Detecting the Construction###\n #################################\n\n coordinates = construction_sign_detection(img_in)\n if coordinates != None:\n DetectedObj['Construction'] = coordinates\n\n ################################# \n ### Detecting the Warning_Sign###\n #################################\n\n coordinates = warning_sign_detection(img_in)\n if coordinates != None:\n DetectedObj['Warning_Sign'] = coordinates\n\n return DetectedObj\n raise NotImplementedError", "def create_captcha_image(self, chars, color, background, warp=False, **kwargs):\n\n rotate_from, rotate_to = kwargs.get('rotate_range', (-5, 5))\n left_rate, width_rate = kwargs.get('left_rate', 0.1), kwargs.get('width_rate', 0.4)\n y_low_rate, y_up_rate = kwargs.get('dy_rate_range', (-0.15, 0.15))\n\n image = Image.new('RGB', (self._width, self._height), background)\n draw = Draw(image)\n\n def _draw_character(c):\n font = random.choice(self.truefonts)\n w, h = draw.textsize(c, font=font)\n\n dx = random.randint(0, 4)\n dy = random.randint(0, 6)\n im = Image.new('RGBA', (w + dx, h + dy))\n Draw(im).text((dx, dy), c, font=font, fill=(25, 25, 25, 25))\n\n # rotate\n im = im.crop(im.getbbox())\n im = im.rotate(random.uniform(rotate_from, rotate_to), Image.BILINEAR, expand=1)\n\n # warp\n if warp:\n dx = w * random.uniform(0.1, 0.5)\n dy = h * random.uniform(0.2, 0.3)\n x1 = int(random.uniform(-dx, dx))\n y1 = int(random.uniform(-dy, dy))\n x2 = int(random.uniform(-dx, dx))\n y2 = int(random.uniform(-dy, dy))\n w2 = w + abs(x1) + abs(x2)\n h2 = h + abs(y1) + abs(y2)\n data = (\n x1, y1,\n -x1, h2 - y2,\n w2 + x2, h2 + y2,\n w2 - x2, -y1,\n )\n im = im.resize((w2, h2))\n im = im.transform((w, h), Image.QUAD, data)\n return im\n\n images = []\n for c in chars:\n images.append(_draw_character(c))\n\n text_width = sum([im.size[0] for im in images])\n\n width = max(text_width, self._width)\n image = image.resize((width, self._height))\n\n average = int(text_width / len(chars))\n rand = int(average * left_rate) #int(0.25 * average)\n offset = int(average * width_rate) #int(average * 0.1)\n\n for im in images:\n w, h = im.size\n mask = im.convert('L').point(table)\n image.paste(im, (offset, int((self._height - h) / 2 + random.uniform(y_low_rate, y_up_rate)*self._height)), mask)\n offset = offset + w + random.randint(-rand, 0)\n\n if width > self._width:\n image = image.resize((self._width, self._height))\n\n return image", "def split_dotted_general(captcha):\n image = captcha[19:46,]\n\n col_sum = np.sum(image, axis = 0)\n col_sum_list = list(col_sum)\n # Finding all the dark regions\n # beggining and end of all dark regions)\n x = 1\n i = 0\n dark_regions = []\n while i < 200:\n if col_sum_list[i] == 0:\n dark_region_beg = i\n while col_sum_list[i + x] == 0:\n x = x + 1\n if (x + i) > 199:\n break\n dark_region_end = i + x - 1\n dark_region = (dark_region_beg, dark_region_end)\n dark_regions.append(dark_region)\n i = x + i + 1\n x = 1\n else:\n i = i + 1\n\n # Identifying leftmost and rightmost dark regions and popping them out of the list\n left_region = dark_regions[0]\n right_region = dark_regions[-1]\n dark_regions.pop(0)\n dark_regions.pop(-1)\n\n # Sorting dark regions according to their length\n five_regions = sorted(dark_regions, key = lambda x: x[1] - x[0], reverse = True)\n\n # Building a list of GAPS (lengths of the dark regions)\n # and LINES that split such gaps in half\n gaps = []\n lines = []\n for i, region in enumerate(five_regions):\n gap = mt.ceil((region[1] - region[0]) / 2)\n if gap == 0:\n continue\n gaps.append(gap)\n lines.append(region[0] + gap)\n\n # If more than 5 gaps are identified, the problem may be due to split letters\n # Some of the troublesome letters are m, n and h\n # We will try to fix this issue by completing gaps in these letters\n if len(lines) > 5:\n\n for i in range(len(col_sum_list[:-9])):\n if col_sum_list[i:i+9] == [0, 0, 0, 0, 510, 510, 0, 3060, 3060]:\n captcha[28:30, i+1:i+3] = 255\n if col_sum_list[i:i+9] == [0, 0, 0, 0, 510, 510, 0, 2550, 2550]:\n captcha[31:33, i+1:i+3] = 255\n if col_sum_list[i:i+9] == [0, 3060, 3060, 0, 510, 510, 0, 0, 0, 0]:\n captcha[28:30, i+7:i+9] = 255\n if col_sum_list[i:i+9] == [0, 2550, 2550, 0, 510, 510, 0, 0, 0, 0]:\n captcha[31:33, i+7:i+9] = 255\n if col_sum_list[i:i+9] == [0, 4080, 4080, 0, 0, 0, 0, 510, 510]:\n captcha[31:33, i+4:i+6] = 255\n\n # Reloading image (based on modified captcha) and redefiding col_sum_list\n image = captcha[19:46, ]\n col_sum_list = list(np.sum(image, axis = 0))\n\n # Finding all the dark regions\n # beggining and end of all dark regions)\n x = 1\n i = 0\n dark_regions = []\n while i < 200:\n if col_sum_list[i] == 0:\n dark_region_beg = i\n while col_sum_list[i + x] == 0:\n x = x + 1\n if (x + i) > 199:\n break\n dark_region_end = i + x - 1\n dark_region = (dark_region_beg, dark_region_end)\n dark_regions.append(dark_region)\n i = x + i + 1\n x = 1\n else:\n i = i + 1\n\n # Identifying leftmost and rightmost dark regions and popping them out of the list\n left_region = dark_regions[0]\n right_region = dark_regions[-1]\n dark_regions.pop(0)\n dark_regions.pop(-1)\n\n # Sorting dark regions according to their length\n five_regions = sorted(dark_regions, key = lambda x: x[1] - x[0], reverse = True)\n\n # Building a list of GAPS (lengths of the dark regions)\n # and LINES that split such gaps in half\n gaps = []\n lines = []\n for i, region in enumerate(five_regions):\n gap = mt.ceil((region[1] - region[0]) / 2)\n if gap == 0:\n continue\n gaps.append(gap)\n lines.append(region[0] + gap)\n\n # If the errors persists, we move on to next captcha\n if len(lines) > 5:\n return('error')\n\n # If the algorithm finds less letters than expected (merged letters), we move on to next captcha\n if len(lines) < 5:\n return('error')\n\n # Defining rightmost and leftmost lines, appending lines list, and sorting\n left_line = left_region[1] - 2\n right_line = right_region[0] + 2\n lines.append(left_line)\n lines.append(right_line)\n lines = sorted(lines)\n\n # Finding letters x-coordinates\n letters_xcoords = []\n for i in range(len(lines)):\n if lines[i] == lines[-1]:\n break\n letter = (lines[i], lines[i + 1])\n letters_xcoords.append(letter)\n\n letters = []\n for i, letter in enumerate(letters_xcoords):\n letter_image = captcha[:60, letter[0]:letter[1]]\n letters.append(letter_image)\n\n return(letters)", "def parse(self, password):\r\n\r\n # Since keyboard combos can look like many other parsings, filter them\r\n # out first\r\n\r\n section_list, found_walks, keyboard_list = detect_keyboard_walk(password)\r\n\r\n self._update_counter_len_indexed(self.count_keyboard, found_walks)\r\n\r\n # Identify e-mail and web sites before doing other string parsing\r\n # this is because they can have digits + special characters\r\n\r\n found_emails, found_providers = email_detection(section_list)\r\n\r\n for email in found_emails:\r\n self.count_emails[email] += 1\r\n for provider in found_providers:\r\n self.count_email_providers[provider] += 1\r\n\r\n found_urls, found_hosts, found_prefixes = website_detection(section_list)\r\n\r\n for url in found_urls:\r\n self.count_website_urls[url] += 1\r\n for host in found_hosts:\r\n self.count_website_hosts[host] += 1\r\n for prefix in found_prefixes:\r\n self.count_website_prefixes[prefix] += 1\r\n\r\n # Identify years in the dataset. This is done before other parsing\r\n # because parsing after this may classify years as another type\r\n\r\n found_years = year_detection(section_list)\r\n\r\n for year in found_years:\r\n self.count_years[year] += 1\r\n\r\n # Need to classify context sensitive replacements before doing the\r\n # straight type classifications, (alpha, digit, etc), but want to doing\r\n # it after other types of classifations.\r\n\r\n found_context_sensitive_strings = context_sensitive_detection(section_list)\r\n\r\n for cs_string in found_context_sensitive_strings:\r\n self.count_context_sensitive[cs_string] += 1\r\n\r\n # Identify pure alpha strings in the dataset\r\n\r\n found_alpha_strings, found_mask_list = alpha_detection(\r\n section_list,\r\n self.multiword_detector\r\n )\r\n\r\n self._update_counter_len_indexed(self.count_alpha, found_alpha_strings)\r\n self._update_counter_len_indexed(self.count_alpha_masks, found_mask_list)\r\n\r\n # Identify pure digit strings in the dataset\r\n\r\n found_digit_strings = digit_detection(section_list)\r\n\r\n self._update_counter_len_indexed(self.count_digits, found_digit_strings)\r\n\r\n # Categorize everything else as other\r\n\r\n found_other_strings = other_detection(section_list)\r\n\r\n self._update_counter_len_indexed(self.count_other, found_other_strings)\r\n\r\n # Calculate the counts of the individual sections for PRINCE dictionary\r\n # creation\r\n\r\n prince_evaluation(self.count_prince, section_list)\r\n\r\n # Now after all the other parsing is done, create the base structures\r\n\r\n is_supported, base_structure = base_structure_creation(section_list)\r\n\r\n if is_supported:\r\n self.count_base_structures[base_structure] += 1\r\n\r\n self.count_raw_base_structures[base_structure] += 1\r\n\r\n return True", "def traffic_sign_detection_challenge(img_in):\n DetectedObj = {}\n coordinates = RealLifeStop(img_in)\n if coordinates != None:\n DetectedObj['Stop'] = coordinates\n\n coordinates = RealLifeYield(img_in)\n if coordinates != None:\n DetectedObj['Yield'] = coordinates\n\n coordinates = RealLifeLight(img_in)\n if coordinates != None:\n DetectedObj['Traffic_Light'] = coordinates\n\n return DetectedObj\n raise NotImplemenedError", "def get_msg(img):\n i = Image.open('%s.ste' % img)\n secret = stg.extract_msg(i)\n mac = secret.split('--:--')[0]\n print 'HMAC hex is: \\n%s\\n' % mac.encode('hex')\n data = secret.split('--:--')[1]\n print 'The hidden message is: \\n%s\\n' % data\n check_hmac(mac)\n i.show()", "def ocr_core_questions(img):\n text = pytesseract.image_to_string(\n img,\n config='--psm 12 --oem 3'\n )\n return text", "def _extract_kiss_text(self, raw_slice):\n self.text = self.frame[raw_slice + 3:]", "def challenge_id_to_captcha(self, challenge_id):\n try:\n secret = self._store.get(challenge_id).decode('utf-8')\n except redis.exceptions.ConnectionError as e:\n self.app.logger.error(\"Unable to connect to Redis database: '{}'.\".format(self.db_url))\n raise RuntimeError(\"Unable to connect to Redis database\")\n except redis.exceptions.ResponseError as e:\n self.app.logger.error(\"Unable to get challenge from Redis database: {}.\".format(e))\n raise RuntimeError(\"Unable to get challenge.\")\n\n if not secret:\n raise ValueError(\"No such challenge\")\n\n image_bytes = self._imageCaptcha.generate(secret)\n return image_bytes", "def f_is_first(captcha):\n \n image = captcha[19:46,]\n\n col_sum = np.sum(image, axis = 0)\n col_sum_list = list(col_sum)\n \n return(col_sum_list[28:36] == [3570, 3570, 0 , 1020, 1020, 0, 510, 510])", "def gen_captcha(text, fnt, fnt_sz, file_name, fmt='JPEG'):\n # randomly select the foreground color\n fgcolor = random.randint(0,0xffff00)\n # make the background color the opposite of fgcolor\n bgcolor = fgcolor ^ 0xffffff\n # create a font object \n font = ImageFont.truetype(fnt,fnt_sz)\n # determine dimensions of the text\n dim = font.getsize(text)\n # create a new image slightly larger that the text\n im = Image.new('RGB', (dim[0]+5,dim[1]+5), bgcolor)\n d = ImageDraw.Draw(im)\n x, y = im.size\n r = random.randint\n # draw 100 random colored boxes on the background\n for num in range(100):\n d.rectangle((r(0,x),r(0,y),r(0,x),r(0,y)),fill=r(0,0xffffff))\n # add the text to the image\n d.text((3,3), text, font=font, fill=fgcolor)\n im = im.filter(ImageFilter.EDGE_ENHANCE_MORE)\n # save the image to a file\n im.save(file_name, format=fmt)", "def testAllIntermediateValues(root):\n\n rawPlaintext = bitmap(\"vck.gif\")\n v1 = rawPlaintext.view(root, \"raw plaintext\")\n\n rawPad = randomBitmap(rawPlaintext.size())\n v2 = rawPad.view(root, \"raw pad\")\n\n rawCiphertext = XOR(rawPlaintext, rawPad)\n v3 = rawCiphertext.view(root, \"raw ciphertext\")\n\n pad = rawPad.pixelcode()\n v4 = pad.view(root, \"pixelcoded pad\")\n\n ciphertext = rawCiphertext.pixelcode()\n v5 = ciphertext.view(root, \"pixelcoded ciphertext\")\n\n decryptedResult = OR(ciphertext, pad)\n v6 = decryptedResult.view(root, \"decrypted result\")\n\n return v1, v2, v3, v4, v5, v6", "def hack_message(self):\r\n\t\t#Will not let user input useless messages that cannot be hacked.\r\n\t\twhile True:\r\n\t\t\tself.message = input(\"Please enter a message you would like to hack. --> \")\r\n\t\t\tif self.message != \"\" and len(self.message) > 4:\r\n\t\t\t\tbreak\t\t\t\r\n\t\tmax_key = len(self.message)\r\n\t\tself.i = 1\r\n\t\tpotential_hits = []\r\n\t\t#Runs through all potential keys. \r\n\t\tfor self.i in range(1, max_key):\r\n\t\t\tprint(f\"Trying key #{self.i}\")\t\t\t\r\n\t\t\tself.my_code = Decryptor(self.message, self.i).transfer_decrypt()\r\n\t\t\tself.hack_plausible = False\r\n\t\t\tself.verify_hack_key()\r\n\t\t\tif self.hack_plausible:\r\n\t\t\t\tpotential_hits.append(f\"Key #{self.i} yeilded {self.percent_english}% english words after decryption.\\n\" + \"\\t\" + self.my_code[:50])\r\n\t\tprint(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\")\r\n\t\tprint(\"Hacking results:\\n\")\r\n\t\tfor hit in potential_hits:\r\n\t\t\tprint(\"\\t\" + hit + \"|\\n\")", "def extract_letters(im):\r\n # Find indices of the 6 horizontal lines and 12 vertical lines of the letters\r\n hor_indices, ver_indices, _, _ = find_longest_lines(im)\r\n hor_lines = sorted(hor_indices[2:8])\r\n ver_lines = sorted(ver_indices[:12])\r\n im_edge = cv2.Canny(im, 50, 100)\r\n \r\n # Extract each letter\r\n letters = []\r\n data = np.load('data.npy')\r\n z = 0\r\n for i in range(2):\r\n for j in range(6):\r\n im_letter = im[hor_lines[i*3]: hor_lines[i*3 + 1], ver_lines[j*2] : ver_lines[j*2 + 1]]\r\n im_letter = imresize(im_letter, (15, 15), 'bicubic') > 75\r\n# im_letter = im_letter.astype(int)\r\n letter = chr(np.argmin(np.sum(np.sum(np.abs(data - im_letter), 1), 1)) + ord('a'))\r\n letters.append(letter)\r\n z += 1\r\n \r\n return letters", "def traffic_sign_detection_challenge(img_in):\n img = img_in.copy()\n clean_picture = cv2.bilateralFilter(img, 9, 75, 75)\n return traffic_sign_detection(clean_picture, light_size=(10, 30), light_offset=10)", "def predict(image_path, wrapper):\n \"\"\"\n #Don't forget to store your prediction into ImgPred\n img_prediction = ImgPred(...)\n \"\"\"\n\n #This is where all of our code will probably go. Here are the steps to success\n\n \n #Step One: Make a list which will contain the locations of every character in our source Image.\n SymPredList = []\n\n #Step Two: Go down that list we just made and use the code from PA4 in conjunction with our new Model to analyze each character. George made this part.\n #This is the find a character part of the code. Max and George worked it out.\n im = cv2.imread(image_path,0)\n (thresh, imbw) = cv2.threshold(im,20,255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n #cv2.imwrite('clapfuck.jpg', imbw)\n im3,contours,hierarchy = cv2.findContours(imbw,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n idx = 0\n for cnt in contours:\n idx += 1\n x1,y1,w,h = cv2.boundingRect(cnt)\n roi=imbw[y1:y1+h,x1:x1+w]\n\n #Step Two.1: Make a Numpy Array of all the pixels starting from the top left corner of an identified character to the bottom right corner of the identified character.\n height, width = roi.shape\n if height >= width:\n padded = cv2.copyMakeBorder(roi,0,0,(height-width)//2,(height-width)//2,cv2.BORDER_CONSTANT,value=[0,0,0])\n else:\n padded = cv2.copyMakeBorder(roi,(width-height)//2,(width-height)//2,0,0,cv2.BORDER_CONSTANT,value=[0,0,0])\n Smol = cv2.resize(padded, (28, 28))\n (thresh, evaluateMe) = cv2.threshold(Smol, 20, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n #scipy.misc.imsave(os.path.basename(file), ree)\n #Step Two.2: Feed that numpy into our PA4 image analyzer converter thing but using our new trained model\n evaluateMeMe = numpy.reshape(evaluateMe, (1, 28, 28, 1))\n prediction = tf.argmax(y_conv,1)\n final_number = prediction.eval(feed_dict={x:evaluateMeMe,y_:numpy.zeros((1,40)), keep_prob:1.0})\n #Step Two.3: Record what we think it is as the prediction field of the SymPred we are currently on\n final_guess = wrapper.label_types[int(final_number)]\n DisSymPred = SymPred(final_guess,x1,y1,x1+w,y1-h)\n SymPredList.append(DisSymPred)\n\n #Step Three: Wrap that now complete SymPred list, in an ImgPred, fill out all the fields of that ImgPred, and then return that shit.\n img_prediction = ImgPred(os.path.basename(image_path), SymPredList)\n\n #Step Four: Were Donezo\n return img_prediction", "def _parse_info(form) :\n w = 0\n n = 0\n nh = 0\n for part in re.findall(\"[A-Z]+[0-9]+\",form):\n m = re.match(\"([A-Z]+)([0-9]+)\",part)\n element = m.group(1)\n number = int(m.group(2))\n w += mass[element.capitalize()]*number\n n += number\n if element != \"H\" : nh += number\n return w,n,nh", "def _parse_info(form) :\n w = 0\n n = 0\n nh = 0\n for part in re.findall(\"[A-Z]+[0-9]+\",form):\n m = re.match(\"([A-Z]+)([0-9]+)\",part)\n element = m.group(1)\n number = int(m.group(2))\n w += mass[element.capitalize()]*number\n n += number\n if element != \"H\" : nh += number\n return w,n,nh", "def update_params(self, d):\n d['captcha_response'] = HTML(displayhtml(self.public_key))\n return super(ReCaptchaWidget, self).update_params(d)", "def process_payload(payload):\n\n # Convertion of payload string to image array for opencv\n ret, img = make_image(payload)#ret is 0 when conversion is successful or 1 when not\n result='Unable to detect'\n if ret == 0:\n cv2.imwrite('received.png', img)\n try:\n roi = extract_roi_2(img)\n \n result = detect(roi) \n \n #write_characters(roi)\n\n except:\n result = \"----------------\"\n # # When roi is extracted its a 2d array \n \n return result", "def parse_creative_serving_decision(data):\n return json.loads(base64.b64decode(data))", "def _parse_challenge(header):\n # type: (str) -> Dict[str, str]\n ret = {}\n if header.startswith(BEARER):\n challenge_params = header[len(BEARER) + 1 :]\n\n matches = re.split(AUTHENTICATION_CHALLENGE_PARAMS_PATTERN, challenge_params)\n _clean(matches)\n ret = {}\n for i in range(0, len(matches), 2):\n ret[matches[i]] = matches[i + 1]\n\n return ret", "def get_dial_value(self, img):\n cimg = img.copy()\n cimg = cv2.medianBlur(cimg,5)\n mask = np.zeros_like(raw_img)\n radius = int(self.radius*0.6)\n mask = cv2.circle(mask,self.center,radius,(255,0,0),-1)\n mask = cv2.circle(mask,self.center,int(radius*0.2),(0,0,255),int(radius*0.5))\n cimg = cv2.bitwise_and(cimg, mask)\n edges = cv2.Canny(cimg,50,150,apertureSize = 3)\n minLineLength = int(radius*0.3)\n maxLineGap = 5\n lines = cv2.HoughLinesP(edges,1,np.pi/180,75,minLineLength,maxLineGap)\n if lines is not None:\n for line in lines:\n for x1,y1,x2,y2 in line:\n cv2.line(cimg,(x1,y1),(x2,y2),(255,255,0),1)\n return cimg", "def OCR_Return_Verifycode(b64string):\n beforeRepaireStr = b64string if isinstance(b64string,bytes) else b64string.encode()\n \n #repair URL parse problem\n #last_img_str=beforeRepaireStr.replace(b' ',b'+');\n #use base64.urlsafe_decode replace\n \n last_img,status_code = base64strToStringIO(beforeRepaireStr)\n if len(status_code)>1:\n code=\"\"\n else:\n code = pytesseract.image_to_string(last_img, lang='eng', config='-psm 7')\n print(code,status_code)\n return code,status_code", "def parse_crowd(self, gc):\n\n try:\n stadium = str(gc.find(name='div', attrs={'class': 'stadium'}).contents[0]).strip()\n crowd = re.search(\"([0-9{1,4}]*[,]*[0-9{3}]+)\", stadium).groups()[0]\n\n if len(crowd) < 3:\n return 0\n else:\n return crowd\n except AttributeError:\n return \"\"\n except IndexError:\n return \"\"", "def process_image(self, msg):\n self.cv_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding=\"bgr8\")\n self.edge_detected = cv2.Canny(self.cv_image,self.minVal,self.maxVal)\n if cv2.__version__.startswith('3.'):\n _, self.contours,_ = cv2.findContours(self.edge_detected, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n else:\n self.contours,_ = cv2.findContours(self.edge_detected, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n self.contour_image = cv2.drawContours(self.cv_image, self.contours, -1, (0,255,0), 3)\n for i in range(len(self.contours)):\n temp = self.dp(self.contours[i], 20)\n self.res.append(len(temp))\n if len(temp) == 7:\n for i in range(0,len(temp)-1,2):\n cv2.line(self.contour_image, (temp[i][0],temp[i][1]),(temp[i+1][0], temp[i+1][1]), (0,0,255), 5)\n if len(temp) == 5:\n for i in range(0,len(temp)-1,2):\n cv2.line(self.contour_image, (temp[i][0],temp[i][1]),(temp[i+1][0], temp[i+1][1]), (255,0,0), 5)", "def submit(request):\n if request.POST:\n form = CaptchaForm(request.POST, request.FILES)\n if form.is_valid():\n image = request.FILES['singleImage']\n extension = image.name.split('.')[1]\n hashname = random.getrandbits(128)\n with open(os.path.join(settings.STATIC_ROOT, \"tmp/%s.%s\" % (hashname, extension)), \"w+\") as imagePath:\n imagePath.write(image.read())\n\n ctx = RequestContext(request, {\"hash\":hashname, \"extension\":extension})\n template = loader.get_template(\"wainz/submission_details.html\")\n\n return HttpResponse(template.render(ctx))\n else:\n form = CaptchaForm()\n\n return render_to_response(\"wainz/submit.html\", dict(form=form), context_instance = RequestContext(request))", "def generate_image(self, chars, with_dots=True, with_curve=True, warp=None):\n warp = warp or True\n background = random_color(238, 255)\n color = random_color(0, 200, random.randint(220, 255))\n im = self.create_captcha_image(chars, color, background, warp=warp)\n if with_dots:\n self.create_noise_dots(im, color)\n if with_curve:\n self.create_noise_curve(im, color)\n im = im.filter(ImageFilter.SMOOTH)\n return im", "def post_image_task(self, file_path):\n url = 'http://2captcha.com/in.php'\n input_file = {'file': open(file_path, 'rb')}\n data = {'key': self.api_key, 'method': 'post', 'json': 1}\n response = self.session.post(url, files=input_file, data=data)\n id_answer = self.handle_id_answer(response.text)\n finished = False\n for _ in range(20): # For making up to 120 seconds of waits\n if 'CAPCHA_NOT_READY' not in response.text:\n finished = True\n break\n # Time Requested by the web page\n sleep(6)\n response = self.session.post(url, files=input_file, data=data)\n id_answer = self.handle_id_answer(response.text)\n\n if not finished:\n return False\n\n return id_answer", "def get_challenge(email, sid):\n params = {'email_address': email, 'assignment_part_sid': sid, 'response_encoding': 'delim'}\n\n challenge_url = '%s%schallenge' % (protocol, base_url)\n data = urllib.parse.urlencode(params).encode('utf-8')\n req = urllib.request.Request(challenge_url, data)\n resp = urllib.request.urlopen(req)\n text = resp.readall().decode('utf-8').strip().split('|')\n\n if len(text) != 9:\n print(' !! %s' % '|'.join(text))\n sys.exit(1)\n \n return tuple(text[x] for x in [2,4,6,8])", "def parse(raw_string, validate): \n # Go field by field.\n passport = Passport()\n\n if not validate:\n # Non-validation mode.\n passport.byr = Passport._find_field_value(raw_string, \"byr\")\n passport.iyr = Passport._find_field_value(raw_string, \"iyr\")\n passport.eyr = Passport._find_field_value(raw_string, \"eyr\")\n passport.hgt = Passport._find_field_value(raw_string, \"hgt\")\n passport.hcl = Passport._find_field_value(raw_string, \"hcl\")\n passport.ecl = Passport._find_field_value(raw_string, \"ecl\")\n passport.pid = Passport._find_field_value(raw_string, \"pid\")\n passport.cid = Passport._find_field_value(raw_string, \"cid\")\n return passport\n\n # Validation mode.\n # byr\n byr_value = Passport._find_field_value(raw_string, \"byr\")\n if len(byr_value) != 4:\n byr_value = \"\"\n try:\n byr_value = int(byr_value)\n if byr_value < 1920 or byr_value > 2002:\n byr_value = \"\"\n except Exception:\n byr_value = \"\"\n passport.byr = byr_value\n\n # iyr\n iyr_value = Passport._find_field_value(raw_string, \"iyr\")\n if len(iyr_value) != 4:\n iyr_value = \"\"\n try:\n iyr_value = int(iyr_value)\n if iyr_value < 2010 or iyr_value > 2020:\n iyr_value = \"\"\n except Exception:\n iyr_value = \"\"\n passport.iyr = iyr_value\n \n # eyr\n eyr_value = Passport._find_field_value(raw_string, \"eyr\")\n if len(eyr_value) != 4:\n eyr_value = \"\"\n try:\n eyr_value = int(eyr_value)\n if eyr_value < 2020 or eyr_value > 2030:\n eyr_value = \"\"\n except Exception:\n eyr_value = \"\"\n passport.eyr = eyr_value\n \n # hgt\n hgt_value = Passport._find_field_value(raw_string, \"hgt\")\n height_number = hgt_value[0:-2]\n height_units = hgt_value[-2:]\n try:\n height_number = int(height_number)\n if height_units == \"cm\":\n if height_number < 150 or height_number > 193:\n hgt_value = \"\"\n elif height_units == \"in\":\n if height_number < 59 or height_number > 76:\n hgt_value = \"\"\n else:\n hgt_value = \"\"\n except Exception:\n hgt_value = \"\"\n passport.hgt = hgt_value\n \n # hcl\n hcl_value = Passport._find_field_value(raw_string, \"hcl\")\n hcl_re_string = r\"#[0-9a-f]{6}\"\n hcl_re = re.compile(hcl_re_string)\n instances = hcl_re.findall(hcl_value)\n if len(instances) != 1:\n hcl_value = \"\"\n passport.hcl = hcl_value\n \n # ecl\n ecl_value = Passport._find_field_value(raw_string, \"ecl\")\n eye_colors = {\"amb\", \"blu\", \"brn\", \"gry\", \"grn\", \"hzl\", \"oth\"}\n if ecl_value not in eye_colors:\n ecl_value = \"\"\n passport.ecl = ecl_value\n \n # pid\n pid_value = Passport._find_field_value(raw_string, \"pid\")\n if len(pid_value) != 9:\n pid_value = \"\"\n try:\n int(pid_value)\n except Exception:\n pid_value = \"\"\n passport.pid = pid_value\n\n # cid is always okay\n passport.cid = Passport._find_field_value(raw_string, \"cid\")\n\n return passport", "def alchallenge_packet(self):\n\n packet = \"\\x00\" # Opcode (Auth Logon Challenge)\n packet += \"\\x08\" # (Error) da wireshark \n packet += chr(30 + len(self.I))\n packet += \"\\x00\\x57\\x6f\\x57\\x00\" # Game name: <WoW>\n packet += \"\\x03\\x03\\x05\" # Version[1,2,3]: <335>\n packet += \"\\x34\\x30\" # Build: <12340>\n packet += \"\\x36\\x38\\x78\\x00\" # Platform: <x86>\n packet += \"\\x6e\\x69\\x57\\x00\" # O.S. : <Win>\n packet += \"\\x53\\x55\\x6e\\x65\" # Country: <enUS>\n packet += \"\\x3c\\x00\\x00\\x00\" # Timezone bias: <60>\n packet += \"\\xc0\\xa8\\x01\\x02\" # IP address: <192.168.1.2>\n packet += chr(len(self.I)) # SRP I length\n packet += self.I # SRP I value\n return packet", "def post(self):\n code = request.form.get('captcha-code')\n username = request.form.get('username')\n password = request.form.get('password')\n # creating dictionary for following logic\n ctx = {'captcha': True, 'username': username}\n\n # captcha inserted/not inserted\n if code:\n logger.info(f'User {username} logged in, step 2')\n # FIXME Remove False after function check_code is created\n # captcha valid/invalid\n if dbhandler.check_code(ctx['username'], code):\n logger.info(f'User {username} successfully logged in')\n set_current_user(username)\n return redirect(url_for('index'), 200)\n else:\n logger.warning(f'User {username} posted wrong captcha')\n return render_template(self.template_name, error='Incorrect captcha code', **ctx)\n\n # user valid/non valid\n user = dbhandler.search_user(username, password)\n if user:\n logger.info(f'User {username} logged in, step 1')\n return render_template(self.template_name, **ctx)\n\n logger.warning(f'User {username} posted wrong password')\n return render_template(self.template_name, error='Incorrect username or password')", "def setUp(self):\n \n ### Secret messages:\n text1 = r\"\"\"\n665555432 64 o42o4 o__ __o/4__o__32 __o__564<|\\4/|>32 /v3 |4/>2\\32 />2\\5 64/ \\`o3o'/ \\32/>3 / \\32 \\o42\\o5364\\o/ v\\2/v \\o/32\\32\\o/4v\\42v\\52 64 |2 <\\/>2 |4o32|42<\\42<\\5264/ \\4/ \\32 <\\__2/ \\3_\\o__</3 _\\o__</5264\\o/4\\o/5554 64 |42|32o54 o54 64/ \\4/ \\3<|>532_<|>_532 6542/ \\42\\o__ __o55654o/2 \\o4 |3 |>3 o54 6532 <|__ __|>32 / \\2 / \\3<|>546532 /32 \\32 \\o/2 \\o/3/ \\54653 o/4 \\o32|3 |3 \\o/54653/v42 v\\3/ \\2 / \\3 |54 652 />43 <\\5/ \\5465555432 65555432 6553o4 o5562\\o__ __o__ __o52 <|>32_<|>_52\\o__ __o362 |3 |3 |>32o__ __o/3< >5 o__ __o32|3 |>2 62/ \\2 / \\2 / \\3 /v3 |32|4 o32 /v3 v\\3/ \\2 / \\2 62\\o/2 \\o/2 \\o/3/>3 / \\3 o__/_3<|>3 />32 <\\2 \\o/2 \\o/2 62 |3 |3 |3 \\32\\o/3 |4/ \\3 \\4 /3|3 |362/ \\2 / \\2 / \\3 o32|32|4\\o/32o32 o3/ \\2 / \\2 6532<\\__2/ \\3 o4 |32 <\\__ __/>56553<\\__3 / \\5432 65555432 6655Acrobatic font by Randy Ransom via Figlet6552 '\n\"\"\"\n text2 = r\"\"\"\n62@@@@@@@3@@@@@@3@@@@@@@2@@@2@@@2 @@@@@@2 62@@@@@@@@2@@@@@@@@2@@@@@@@@2@@@2@@@2@@@@@@@2 62@@!2@@@2@@!2@@@2!@@32 @@!2!@@2!@@32 62!@!2@!@2!@!2@!@2!@!32 !@!2@!!2!@!32 62@!@!!@!2 @!@2!@!2!@!32 @!@@!@!2 !!@@!!362!!@!@!3!@!2!!!2!!!32 !!@!!!3 !!@!!!2 62!!: :!!2 !!:2!!!2:!!32 !!: :!!4!:!262:!:2!:!2:!:2!:!2:!:32 :!:2!:!32!:!2 62::2 :::2::::: ::2 ::: :::2 ::2:::2:::: ::2 62 :2 : :2 : :2:3:: :: :2 :2 :::2:: : :26 655 Poison font by Vinney Thai via Figlet6 \n \"\"\"\n\n def decode(str):\n \"\"\"This just decodes the above strings into something \n meaningful.\"\"\"\n s6 = re.sub('6','\\n',str)\n s5 = re.sub('5','44',s6)\n s4 = re.sub('4','33',s5)\n s3 = re.sub('3','22',s4)\n return re.sub('2',' ',s3)\n \n self.item = CurlTestBlobEntry(mytext=decode(text1), mytext2=decode(text2))\n self.item.put();", "def ocr_core_expressvote(img):\n text = pytesseract.image_to_string(\n img,\n config='--psm 6 --oem 3'\n )\n return text", "def substantiate():", "def recognize():\n return 0", "def decode(r):\n ## Check length\n if len(r) != 60: return False\n ## Check cookie\n if r[0x0:0x2] != b'\\xef\\xfe': return False\n t = r[2]\n mac = \"%02x:%02x:%02x:%02x:%02x:%02x\" % struct.unpack(\"BBBBBB\",r[3:9])\n gateware = \"{0}.{1}\".format(r[0x09],r[0x15])\n radio_id = r[0x0a]\n temp = r[0x0b]\n use_eeprom_ip = (temp & 0x80) != 0\n use_eeprom_mac = (temp & 0x40) != 0\n favor_dhcp = (temp & 0x20) != 0\n eeprom_ip = \"%d:%d:%d:%d\" % struct.unpack(\"BBBB\",r[0x0d:0x11])\n eeprom_mac = \"%02x:%02x:%02x:%02x:%02x:%02x\" % struct.unpack(\"BBBBBB\",r[0x03:0x07]+r[0x11:0x13])\n receivers = r[0x13]\n temp = r[0x14]\n board_id = temp & 0x3f\n wideband_type = 0x03 & (temp >> 6)\n response_data = struct.unpack('!L',r[0x17:0x1b])[0]\n temp = r[0x1b]\n ext_cw_key = (temp & 0x80) != 0\n tx_on = (temp & 0x40) != 0\n adc_clip_cnt = temp & 0x03\n temperature = struct.unpack('!H',r[0x1c:0x1e])[0]\n # For best accuracy, 3.26 should be a user's measured 3.3V supply voltage.\n temperature = (3.26 * (temperature/4096.0) - 0.5)/0.01\n # TODO: Add proper power compoutation, maybe table interpolation like Quisk\n fwd_pwr = struct.unpack('!H',r[0x1e:0x20])[0]\n rev_pwr = struct.unpack('!H',r[0x20:0x22])[0]\n bias = struct.unpack('!H',r[0x22:0x24])[0]\n bias = ((3.26 * (bias/4096.0))/50.0)/0.04\n temp = r[0x24]\n txfifo_recovery = (temp & 0x80) != 0\n txfifo_msbs = (temp & 0x7f)\n return Response(t,mac,gateware,radio_id,use_eeprom_ip,use_eeprom_mac,favor_dhcp,eeprom_ip,eeprom_mac,\n receivers,board_id,wideband_type,response_data,ext_cw_key,tx_on,adc_clip_cnt,temperature,\n fwd_pwr,rev_pwr,bias,txfifo_recovery,txfifo_msbs)", "def captcha_protected(self):\n return settings.RECAPTCHA_ENABLE", "def captcha_protected(self):\n return settings.RECAPTCHA_ENABLE", "def captcha_protected(self):\n return settings.RECAPTCHA_ENABLE", "def find_letters(line_image):\r\n\r\n\tif line_image.shape[0] < 40:\r\n\t\tline_image = cv2.resize(line_image, (line_image.shape[1] * 2, line_image.shape[0] * 2))\r\n\r\n\t#binary\r\n\tret,thresh = cv2.threshold(line_image, 109, 255, cv2.THRESH_BINARY_INV)\r\n\r\n\tif cv2.__version__.startswith('3.'):\r\n\t\tim2, ctrs, hier = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n\telse:\r\n\t\t(ctrs, __) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n\t#sort contours\r\n\tsorted_ctrs = sorted(ctrs, key=lambda ctr: cv2.boundingRect(ctr)[0], reverse=True)\r\n\r\n\t#creating objects - so we coult hold a few arguments that connected together in the same variable\r\n\r\n\tclass contur:\r\n\t\tdef __init__(self, x, y, w, h):\r\n\t\t\tself.x_start = x\r\n\t\t\tself.y_start = y\r\n\t\t\tself.x_end = x + w\r\n\t\t\tself.y_end = y + h\r\n\r\n\tletters_images = list()\r\n\tnew_ctr = list()\r\n\r\n\tfor j, ctr in enumerate(sorted_ctrs):\r\n\t\tx, y, w, h = cv2.boundingRect(ctr)\r\n\t\tc = contur(x, y, w, h)\r\n\t\tnew_ctr.append(c)\r\n\r\n\tlength = len(new_ctr)\r\n\r\n\ti = 0\r\n\twhile i < length:\r\n\t\tx, y, w, h = cv2.boundingRect(sorted_ctrs[i])\r\n\r\n\t\tif h > 3:\r\n\t\t\tcanvas = np.ones_like(line_image)\r\n\t\t\tcanvas.fill(255)\r\n\t\t\tcv2.drawContours(canvas, sorted_ctrs, i, (0, 0, 0), 3)\r\n\r\n\t\t\tif i < length - 1 and new_ctr[i].x_start >= new_ctr[i + 1].x_start and new_ctr[i].x_end <= new_ctr[i + 1].x_end:\r\n\t\t\t\tY_end_bigger = max(new_ctr[i].y_end, new_ctr[i + 1].y_end)\r\n\t\t\t\tcv2.drawContours(canvas, sorted_ctrs, i + 1, (0, 0, 0), 3)\r\n\r\n\t\t\t\tif union_left_ctr(new_ctr[i], new_ctr[i+1], canvas) == 0:\r\n\t\t\t\t\troi = canvas[y:y + h, x:x + w]\r\n\t\t\t\t\troiriginal = line_image[y:y + h, x:x + w]\r\n\t\t\t\telse:\r\n\t\t\t\t\troi = canvas[new_ctr[i + 1].y_start:Y_end_bigger, new_ctr[i + 1].x_start:new_ctr[i + 1].x_end]\r\n\t\t\t\t\troiriginal = line_image[new_ctr[i + 1].y_start:Y_end_bigger, new_ctr[i + 1].x_start:new_ctr[i + 1].x_end]\r\n\t\t\t\t\ti += 1\r\n\t\t\telse:\r\n\t\t\t\troi = canvas[y:y + h, x:x + w]\r\n\t\t\t\troiriginal = line_image[y:y + h, x:x + w]\r\n\r\n\t\t\tletter = np.pad(roiriginal, pad_width=10, mode='constant', constant_values=255)\r\n\r\n\t\t\tletters_images.append(letter)\r\n\t\ti += 1\r\n\treturn letters_images", "def extract_text(para_tokenized):\n\n for _ in range(10):\n text = random.choice(para_tokenized)\n if text and 60 < len(text) < 210:\n return text\n\n return None", "def profanityCheck(text):\n return predict_prob([text])[0]", "def create_challenge():\n\treturn os.urandom(12)", "def process(self, image):", "def get_num_cams(self, data):\n return self.parse_data(data)[1]", "def SecondPart():\n return passwordChecker(data)", "def ByPass_capcha(headers=dict,url=str):\n data = []\n regex = re.compile(f\"^(/)\")\n spm = \"dtitle\"\n\n cookies,datcookies = get_cookies(url,headers)\n\n html = requests.get(url,headers=headers,\n cookies=cookies).text\n soup = BeautifulSoup(html, \"html.parser\")\n try:\n for div in soup.findAll('div',{'data-name':'m_pos'}):\n for a in div.find_all('a',href=regex):\n if a.attrs['data-spm'] == spm:\n data.append({'title': a.text,'url': a.attrs['href']})\n divlast = soup.find_all('div',{'class':'label-text_VrGXs'})\n page = int(divlast[-1].text)\n htmllastpage = get_html(page,cookies)\n\n return data,htmllastpage,datcookies\n \n except Exception as e:\n return [\"Capcha Found\"],\"Capcha Found !!!\",[\"Capcha Found\"]", "def __init__(self, blank_data, valid=False):\n self.step = '0'\n self.raw_text = blank_data['raw_text'].lower() # 原始识别结果,包含删除符号,标点符号\n self.text = blank_data['text'].lower() # 干净识别结果,只有字符与空格\n self.reference = blank_data['reference'].lower() # 标准答案\n self.prob = blank_data['prob'] # 对应原始识别结果的概率list\n self.prob_avg = blank_data['prob_val'] # 概率list的平均值\n self.url = blank_data['url'] # 填空题图片地址 \n\n if valid:\n self.marked = blank_data['marked'] # 是否被人工check过 \n self.human_text = blank_data['manuallyResult'].lower() # 运营人员标注结果\n self.score = blank_data['score'] # 该题的得分值\n \n self.ref_size = len(self.reference)\n self.ans_size = len(self.text)\n self.raw_size = len(self.raw_text)\n self.text_size = len(self.text)\n self.ref_word_size = len(self.reference.split(' '))\n self.ans_word_size = len(self.text.split(' '))\n\n # 针对多个答案的填空题\n LIST_ANS = self.reference.split('@@')\n LIST_SIZE = [len(ans) for ans in LIST_ANS if len(ans)<2]\n LIST_NB = [ans for ans in LIST_ANS if ans.isdigit()]\n\n self.FLAG_SHORT = True if LIST_SIZE!=[] else False\n self.FLAG_DIGIT = True if LIST_NB!=[] else False\n self.FLAG_MULTI = True if '@@' in self.reference else False\n\n # generate pure text and reference\n LIST_toclean = [' ', '.', '-', '?', '!', ',', ':']\n pure_text = self.text\n pure_ref = self.reference\n for item in LIST_toclean:\n pure_text = pure_text.replace(item, '')\n pure_ref = pure_ref.replace(item, '')\n self.pure_text = pure_text\n self.pure_ref = pure_ref\n self.pure_text_size = len(self.pure_text)\n self.pure_ref_size = len(self.pure_ref)", "def encode(img):\r\n msg=getInput(img); #Get User input\r\n ints=stringToInts(msg); #Convert all characters in the input to their ascii values\r\n ImageUtilities.setPixelAlphasFromIntsRandom(img,ints); #For every ascii value set a different pixel's alpha value to that ascii value.\r\n return img;", "def find_tape():\n\n _, frame = CAP.read()\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(hsv, color_lower, color_upper)\n _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE)\n\n # Find all valid pair rects, and reutrn if none found\n pair_rects = get_pair_rects(contours)\n if len(pair_rects) == 0:\n return\n\n # If found, continue on and post results\n center = closest_center(pair_rects)\n\n to_send = '{}:{}\\n'.format(\n round(time.time(), 3), round(degrees(horizontal_angle(center[0])), 3))\n print(to_send)\n s.send(bytearray(to_send, 'utf-8'))" ]
[ "0.62265563", "0.6219252", "0.61916035", "0.6096624", "0.60874146", "0.59667766", "0.59021354", "0.58705884", "0.5825633", "0.5809599", "0.57755667", "0.5746426", "0.5594208", "0.5538595", "0.55344874", "0.55010104", "0.5449904", "0.53967845", "0.53909343", "0.5288578", "0.5287296", "0.5286236", "0.52829856", "0.5254987", "0.5246941", "0.5169091", "0.5159369", "0.5097183", "0.5085489", "0.5080703", "0.5059381", "0.50465286", "0.50334597", "0.5029186", "0.5012406", "0.4979068", "0.49787685", "0.49767226", "0.49684954", "0.49333897", "0.4922056", "0.48874193", "0.4862065", "0.48495", "0.48487684", "0.48060974", "0.4795562", "0.47888258", "0.4786766", "0.47765896", "0.47756222", "0.4737967", "0.47355175", "0.47191948", "0.4702975", "0.47028738", "0.47021425", "0.47007963", "0.46818143", "0.467556", "0.4672708", "0.46562862", "0.46361628", "0.4619324", "0.46060765", "0.46060765", "0.45778713", "0.45674178", "0.4541432", "0.45405343", "0.45371306", "0.45356938", "0.45256665", "0.45231855", "0.4511048", "0.45107937", "0.45086372", "0.45084205", "0.45015246", "0.44975498", "0.4485891", "0.44793466", "0.44735143", "0.44730705", "0.44713017", "0.44680583", "0.4460131", "0.4460131", "0.4460131", "0.4458478", "0.4447531", "0.44442475", "0.44277498", "0.44221336", "0.44129446", "0.44126955", "0.4412002", "0.44089207", "0.43948773", "0.43906727" ]
0.67830896
0
The set of arguments for constructing a Environment resource.
def __init__(__self__, *, application_name: pulumi.Input[str], cname_prefix: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, environment_name: Optional[pulumi.Input[str]] = None, operations_role: Optional[pulumi.Input[str]] = None, option_settings: Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentOptionSettingArgs']]]] = None, platform_arn: Optional[pulumi.Input[str]] = None, solution_stack_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentTagArgs']]]] = None, template_name: Optional[pulumi.Input[str]] = None, tier: Optional[pulumi.Input['EnvironmentTierArgs']] = None, version_label: Optional[pulumi.Input[str]] = None): pulumi.set(__self__, "application_name", application_name) if cname_prefix is not None: pulumi.set(__self__, "cname_prefix", cname_prefix) if description is not None: pulumi.set(__self__, "description", description) if environment_name is not None: pulumi.set(__self__, "environment_name", environment_name) if operations_role is not None: pulumi.set(__self__, "operations_role", operations_role) if option_settings is not None: pulumi.set(__self__, "option_settings", option_settings) if platform_arn is not None: pulumi.set(__self__, "platform_arn", platform_arn) if solution_stack_name is not None: pulumi.set(__self__, "solution_stack_name", solution_stack_name) if tags is not None: pulumi.set(__self__, "tags", tags) if template_name is not None: pulumi.set(__self__, "template_name", template_name) if tier is not None: pulumi.set(__self__, "tier", tier) if version_label is not None: pulumi.set(__self__, "version_label", version_label)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cookiecutter_args(self) -> dict[str, str]:\n local_args = {\n \"add_golden\": \"y\" if self.golden_tests else \"n\",\n \"copyright_holder\": self.copyright_holder,\n \"copyright_year\": (\n self.today.strftime(\"%Y\")\n if not self.copyright_year\n else self.copyright_year\n ),\n \"github_owner\": self.github_owner,\n \"name\": self.name,\n \"slug\": self.slug,\n # The template expects the test cases in a single string separated by\n # spaces.\n \"test_cases\": \" \".join(self.test_cases),\n }\n cruft_json = self.target_dir / \".cruft.json\"\n if cruft_json.is_file():\n with open(cruft_json, \"r\", encoding=\"utf-8\") as f:\n cruft_json_data = json.load(f)\n args = cruft_json_data[\"context\"][\"cookiecutter\"]\n for k, v in local_args.items():\n args[k] = v\n else:\n args = local_args\n\n return args", "def _set_arguments(self):\n cert_location = f\"dependencies{sep}certificates{sep}localuser.crt\"\n key_location = f\"dependencies{sep}certificates{sep}localuser.key\"\n assert Path(cert_location).exists(), (\n f\"The certificate isn't \"\n f\"present at location {Path(cert_location).absolute()}\"\n )\n assert Path(key_location).exists(), (\n f\"The certificate key isn't \"\n f\"present at location {Path(key_location).absolute()}\"\n )\n self._arguments = [\n (\n \"test-certificate-verify\",\n [\"-k\", key_location, \"-c\", cert_location],\n ),\n (\n \"test-sig-algs\",\n [],\n ),\n (\n \"test-clienthello-md5\",\n [],\n ),\n (\n \"test-tls13-pkcs-signature\",\n [],\n ),\n ]", "def __init__(__self__,\n resource_name: str,\n args: EnvironmentArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def _build_arguments(self):\n # TODO: comeback to allow test path override. maybe?\n # self._parser.add_argument(\n # '--test-path',\n # type=utils.validate_path,\n # required=False,\n # help=('Path th projects test Dockerfile. Dockerfile should be in the root of the test directory.')\n # )\n self._parser.add_argument(\n '--configs',\n type=bool,\n required=False,\n default=False,\n help=\"Would you like to inject configuration files?\"\n )", "def create_environment(args):\n env.username = args.user\n env.password = args.password\n env.service_url = args.service_url\n env.quiet = args.quiet\n env.verbose = args.verbose\n env.manifest = args.manifest\n env.debug = args.debug\n env.always_confirm = args.yes\n env.args = args\n env.api = ravello.RavelloClient(env.username, env.password, env.service_url)", "def _setup_arguments(self):\n\n self._parser.add_argument(\"-a\", \"--area-interest\",\n help=\"Area of interest to process, \"\n \"shapefile path\", required=True)\n # FUTURE VERSIONS\n # self._parser.add_argument(\"-s\", \"--srtm-dem\",\n # help=\"Path to SRTM DEM file. Zip format\",\n # required=False)\n # self._parser.add_argument(\"-y\", \"--hsheds-dem\",\n # help=\"Path to HSHEDS DEM file. Zip format\",\n # required=False)\n # self._parser.add_argument(\"-g\", \"--groves-file\",\n # help=\"Path to groves classification file. \"\n # \"Zip format\",\n # required=False)", "def wrapper_environment(args):\n\n return {\n ENVIRONMENT_KEY: json.dumps({\n 'verbose': args.verbose,\n 'cc': shlex.split(args.cc),\n 'cxx': shlex.split(args.cxx)\n })\n }", "def get_arguments_configuration(argv):\n arguments_config = {\n 'execution_arguments': str(argv),\n 'execution_config': {\n 'component': ExecutionConfig,\n 'component_args': ['env_layer', 'composite_logger'],\n 'component_kwargs': {\n 'execution_parameters': str(argv)\n }\n }\n }\n return arguments_config", "def initialise(self, args, environ):", "def fill_args(cls, toolchain, parser):\n pass # pass must be overloaded (if required)", "def define_args(self, env, *args):\n if (len(self.params) != len(args) and not self.rest_name) or len(self.params) > len(args):\n raise SnekEvaluationError(\"wrong number of arguments (expected {}{}, got {})\".format(len(self.params), \"+\" if self.rest_name else \"\", len(args)))\n for (i, param) in enumerate(self.params):\n env.define(param, args[i])\n if self.rest_name:\n env.define(self.rest_name, list_snek(*args[len(self.params):]))", "def get_arguments():\n\n # Creates the ArgumentParser\n parser = argparse.ArgumentParser(usage='Creates an ensemble of classifiers based on majority voting.')\n\n # Adds a dataset argument with pre-defined choices\n parser.add_argument('dataset', help='Dataset identifier', choices=['RSDataset', 'RSSCN7', 'UCMerced_LandUse'])\n\n return parser.parse_args()", "def universal_args(self):\n args = list(self.BASIC_ARGS)\n # Set ATF to be the bios\n args += [\"-bios\", \"%s/bl1.bin\" % self.config.atf]\n\n if self.config.linux:\n args += [\n \"-kernel\",\n \"%s/arch/arm64/boot/Image\" % self.config.linux\n ]\n args += [\"-append\", self.LINUX_ARGS]\n\n if self.config.android:\n args += self.android_drives_args()\n\n return args", "def setup_args(self):\n self.parser = argparse.ArgumentParser()\n self.group = self.parser.add_mutually_exclusive_group()\n\n self.group.add_argument('-a', '--add', help='Adds a new task to the task list', action='store_true')\n self.group.add_argument('-r', '--remove', help='Removes a task from the task list', action='store_true')\n self.group.add_argument('-f', '--finish', help='Sets a task to be finished', action='store_true')\n self.group.add_argument('-u', '--unfinish', help='Sets a task to be not finished', action='store_true')\n self.group.add_argument('-c', '--change', help='Updates an existing task', action='store_true')\n self.group.add_argument('-v', '--view', help='View your current task list', action='store_true')\n\n return self.parser", "def get_cli_arguments(self):\n pass", "def create_arg_config(environment, region, template, parameters):\r\n raw_config = {\r\n 'Environment': environment,\r\n 'Region': region\r\n }\r\n if template:\r\n raw_config['Template'] = template\r\n if parameters:\r\n raw_config['Parameters'] = dict(parameters)\r\n return Config(raw_config)", "def read_arguments(argv):\n\tif argv[0] in ('1', '2'):\n\t\tconos_config['endpoint'] = endpoint[argv[0]]\n\telse:\n\t\tusage()\n\n\tif argv[1] in ('dev', 'test', 'int', 'prod'):\n\t\tconos_config['environment'] = argv[1]\n\t\tconos_config['sts_url'] = eval(argv[1] + '_sts_url')\n\t\tconos_config['aicuu_url'] = eval(argv[1] + '_aicuu_url')\n\telse:\n\t\tusage()\n\n\tif len(argv) == 6:\n\t\tconos_config['number_threads'] = '1'\n\telse:\n\t\tif argv[6] in ('1', '2', '3', '4', '5', '6', '7', '8'):\n\t\t\tconos_config['number_threads'] = argv[6]\n\t\telse:\n\t\t\tusage()\n\n\tconos_config['client_id'] = argv[2]\n\tconos_config['client_secret'] = argv[3]\n\tconos_config['input_file'] = argv[4]\n\tconos_config['output_file'] = argv[5]", "def base_arguments(self):\n raise NotImplementedError()", "def _get_args():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('--env', '-e',\n type=str,\n default='Zelda1-v0',\n choices=['Zelda1-v0'],\n help='The environment to play'\n )\n parser.add_argument('--mode', '-m',\n type=str,\n default='human',\n choices=['human', 'random'],\n help='The execution mode for the environment.'\n )\n parser.add_argument('--steps', '-s',\n type=int,\n default=500,\n help='The number of random steps to take.',\n )\n return parser.parse_args()", "def add_arguments(parser):\n parser.add_argument('-e', '--environment', help='Environment name', required=True)\n parser.add_argument('-w', '--dont-wait', help='Skip waiting for the init to finish', action='store_true')\n parser.add_argument('-l', '--version-label', help='Version label', required=False)", "def setup_args(cls, parser):\n pass", "def add_env_args(parser):\n # sawyer\n parser.add_argument(\n \"--reward_type\",\n type=str,\n default=\"dense\",\n choices=[\"dense\", \"sparse\"],\n help=\"reward type\",\n )\n parser.add_argument(\n \"--distance_threshold\",\n type=float,\n default=0.06,\n help=\"distance threshold for termination\",\n )\n parser.add_argument(\n \"--max_episode_steps\",\n type=int,\n default=70,\n help=\"maximum timesteps in an episode\",\n )\n parser.add_argument(\n \"--camera_name\",\n type=str,\n default=\"visview\",\n help=\"camera name in an environment\",\n )\n\n # observations\n parser.add_argument(\n \"--frame_skip\", type=int, default=1, help=\"Numer of skip frames\"\n )\n parser.add_argument(\n \"--action_repeat\", type=int, default=1, help=\"number of action repeats\"\n )\n parser.add_argument(\n \"--ctrl_reward_coef\", type=float, default=0, help=\"control reward coefficient\"\n )\n\n parser.add_argument(\n \"--kp\", type=float, default=40.0, help=\"p term for a PID controller\"\n ) # 150.)\n parser.add_argument(\n \"--kd\", type=float, default=8.0, help=\"d term for a PID controller\"\n ) # 20.)\n parser.add_argument(\n \"--ki\", type=float, default=0.0, help=\"i term for a PID controller\"\n )\n parser.add_argument(\n \"--frame_dt\", type=float, default=0.15, help=\"delta t between each frame\"\n ) # 0.1)\n parser.add_argument(\n \"--use_robot_indicator\",\n type=eval,\n default=False,\n help=\"enable visualization of robot indicator for motion planner\",\n )\n parser.add_argument(\n \"--use_target_robot_indicator\",\n type=eval,\n default=False,\n help=\"enable visualization of robot indicator for target position of motion planner\",\n )\n parser.add_argument(\n \"--success_reward\", type=float, default=150.0, help=\"completion reward\"\n )\n parser.add_argument(\n \"--contact_threshold\",\n type=float,\n default=-0.002,\n help=\"depth thredhold for contact\",\n )\n parser.add_argument(\n \"--joint_margin\", type=float, default=0.001, help=\"marin of each joint\"\n )\n parser.add_argument(\"--task_level\", type=str, default=\"easy\")\n parser.add_argument(\n \"--step_size\",\n type=float,\n default=0.02,\n help=\"step size for invalid target handling\",\n )\n # puck\n parser.add_argument(\"--puck_friction\", type=float, default=2.0)\n parser.add_argument(\"--puck_mass\", type=float, default=0.01)\n parser.add_argument(\"--source_env_puck_friction\", type=float, default=2.0)\n parser.add_argument(\"--source_env_puck_mass\", type=float, default=0.01)\n parser.add_argument(\"--target_env_puck_friction\", type=float, default=2.0)\n parser.add_argument(\"--target_env_puck_mass\", type=float, default=0.01)\n\n parser.add_argument(\"--env_ob_source\", type=str2bool, default=False)\n parser.add_argument(\"--end_effector\", type=str2bool, default=True)\n parser.add_argument(\"--ik_target\", type=str, default=\"grip_site\")\n parser.add_argument(\n \"--action_range\", type=float, default=0.1, help=\"range of radian\"\n )\n parser.add_argument(\"--dr\", type=str2bool, default=False)\n parser.add_argument(\"--dr_params_set\", type=str, default=\"IP_large_range\")\n\n parser.add_argument(\"--mod_env_params\", type=str2bool, default=False)\n parser.add_argument(\"--param_mod_instructions\", type=eval, default=[])\n\n parser.add_argument(\"--unity\", type=str2bool, default=False)\n parser.add_argument(\"--unity_editor\", type=str2bool, default=False)\n parser.add_argument(\"--virtual_display\", type=str, default=\":1\")\n parser.add_argument(\"--port\", type=int, default=4000)\n\n # FetchReach action\n parser.add_argument(\"--action_rotation_degrees\", type=float, default=0.0)\n parser.add_argument(\"--action_z_bias\", type=float, default=0.0)", "def _get_init_args(self):\n\n return dict(enum=self.enum, dflt=self._defname,\n base=self.base, shape=self.shape)", "def setup_args(**kargs):\n args = [get_nupack_exec_path(kargs['exec_name']),\n '-material', kargs['material'], '-sodium', kargs['sodium'],\n '-magnesium', kargs['magnesium'], '-dangles', kargs['dangles'], '-T', kargs['T']]\n if kargs['multi']: args += ['-multi']\n if kargs['pseudo']: args += ['-pseudo']\n return args", "def get_args():\n parser = ArgumentParser(description='main interface to provision system')\n parser.add_argument('--region-list', help='list of regions for provisioning purposes',\n required=True, nargs='+')\n parser.add_argument('--outfile', help='file to save region secrets to', required=True)\n args = parser.parse_args()\n return args.region_list, args.outfile", "def arguments_base(token):\n return Arguments(\n verbose=TEST_VERBOSE,\n token=token,\n slug=TEST_SLUG,\n tag=TEST_TAG,\n body=TEST_BODY,\n rel_name=TEST_REL_NAME,\n commitish=TEST_COMMITISH\n )", "async def set_args(self, **kwargs):\n self.original_arguments = kwargs\n Args = namedtuple('Args', [k for k, v in self.arguments.items()])\n Args.__new__.__defaults__ = (None,) * len(self.arguments.items())\n\n valid = {}\n for k, arg in self.arguments.items():\n val = kwargs.get(k, None)\n if val is None and arg.required:\n raise Exception('{0} is required'.format(k))\n\n if arg.options and val not in arg.options:\n raise Exception('{0} provided for {1}. Expected {2}'.format(\n val,\n k,\n arg.options\n ))\n\n if callable(arg.validator):\n val = arg.validator(val, k)\n\n valid[k] = val\n\n self._args = Args(**valid)", "def arguments(**kw):\n return export_arguments('cc', _all_arguments, _groups, **kw)", "def cmake_args(self):\n args = [\n self.define(\"CMAKE_C_COMPILER\", self.spec[\"mpi\"].mpicc),\n self.define(\"BUILD_SHARED_LIBS\", True),\n self.define(\"BUILD_TESTING\", self.run_tests),\n ]\n return args", "def getSpawnArgs(self):\r\n _args = [\r\n executable, # path to python executable e.g. /usr/bin/python\r\n ]\r\n if not self.options['loud']:\r\n _args += ['-W', 'ignore']\r\n _args += [\r\n 'manage.py',\r\n 'hx',\r\n 'start',\r\n '--http_port', str(self.options['http_port']),\r\n '--https_port', str(self.options['https_port']),\r\n '--cache_port', str(self.options['cache_port']),\r\n '--workers', '0',\r\n '--fd', pickle.dumps(self.fds),\r\n ]\r\n if self.is_secure:\r\n _args += [\r\n '--key', self.options.get('key'),\r\n '--cert', self.options.get('cert')\r\n ]\r\n if self.options['nocache']:\r\n _args.append('--nocache')\r\n if self.options['dev']:\r\n _args.append('--dev')\r\n if self.options['traceback']:\r\n _args.append('--traceback')\r\n if self.options['global_cache']:\r\n _args.append('--global_cache')\r\n if not self.use_settings:\r\n _args += ['--wsgi', self.options['wsgi']]\r\n return _args", "def __init__(self, resource, *args):\n self.args = list(args)\n self.flags = OrderedDict()\n self.additional_flags = []\n self._AddCommonFlags(resource)", "def cmd_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--image\",\n help=\"Full image path can be optionally supplied.\")\n args = parser.parse_args()\n return args", "def __init__(self, env, system=None):\n self._env = env\n self._system = system if system is not None else {}", "def generate_init_args(self):\n return self.generator(self.constructor_spec)", "def build_arg_list(fn, env):\r\n kw = {}\r\n argspec = inspect.getargspec(fn)\r\n\r\n # if there is a **kw argument in the fn definition,\r\n # just pass along the environment\r\n if argspec[2]:\r\n kw = env\r\n #else for each entry in the arglist set the value from the environment\r\n else:\r\n #skip self\r\n argnames = argspec[0][1:]\r\n for name in argnames:\r\n if name in env:\r\n kw[name] = env[name]\r\n return kw", "def CreateArgs(run_task_request, args):\n if getattr(args, \"ARGS\", None):\n args_ref = dataplex_api.FetchExecutionSpecArgs(args.ARGS)\n if len(args_ref) > 0:\n return run_task_request.ArgsValue(\n additionalProperties=[\n run_task_request.ArgsValue.AdditionalProperty(\n key=key, value=value\n )\n for key, value in sorted(args_ref.items())\n ]\n )\n return None", "def _create_extra_environment(self):\n return {}", "def get_arguments():\n parser = argparse.ArgumentParser(description=\"TODO\")\n \n parser.add_argument('config_filepath', \n action='store', \n type=str, \n help='Path to configuration file containing paths of third parties libraries, projects, data directories, etc. See README for more information.')\n\n parser.add_argument('-C', '--config_cases', \n action='store',\n type=str,\n dest=\"config_cases\",\n help='Path to configuration file containing cases. The default one is stored at dask_io_experiments/experiment_5/cases.json',\n default=\"./dask_io_experiments/experiment_5/cases.json\")\n\n return parser.parse_args()", "def init_args(self):\n return {\n \"doc\": self.__doc__.format(name=colored(self.module_name, \"green\", attrs=['bold','underline'])),\n \"Url\": \"set a target url\",\n 'Type': \"set type to check , [php, asp, aspx, cgi, dir , mdb]\",\n }", "def get_arguments() -> argparse.Namespace:\n\n parser = argparse.ArgumentParser(\n description=\"\"\"\n train a network for image classification with Flowers Recognition Dataset.\n \"\"\"\n )\n parser.add_argument(\"config\", type=str, help=\"path of a config file\")\n parser.add_argument(\n \"--resume\",\n action=\"store_true\",\n help=\"Add --resume option if you start training from checkpoint.\",\n )\n parser.add_argument(\n \"--use_wandb\",\n action=\"store_true\",\n help=\"Add --use_wandb option if you want to use wandb.\",\n )\n parser.add_argument(\n \"--debug\",\n action=\"store_true\",\n help=\"Add --debug option if you want to see debug-level logs.\",\n )\n parser.add_argument(\n \"--seed\",\n type=int,\n default=42,\n help=\"random seed\",\n )\n\n return parser.parse_args()", "def create_args():\n return {\n \"username\": fields.String(required=True, validate=validate.Length(min=1)),\n \"email\": fields.Email(required=True),\n \"password\": fields.String(required=True),\n }", "def get_command_env(self) -> Sequence[Mapping[str, str]]:\n return [\n {\"name\": \"DAGSTER_COMPRESSED_EXECUTE_STEP_ARGS\", \"value\": self._get_compressed_args()},\n ]", "def full_args():\n return setup_args()", "def get_argdict(cls, toolchain, args):\n return {} # Empty must be overloaded (if required)", "def get_arguments():\n parser = argparse.ArgumentParser(description='Generates Terraform code from datadog monitors ID numbers')\n parser.add_argument(\"--input\", \"-i\", type=str, required=False, default=\"monitors.json\", help=\"Input JSON filename that contains Monitors ID numbers. i.e monitors.json\")\n parser.add_argument(\"--output\", \"-o\", type=str, required=False, default=\"monitors.tf\", help=\"Output Terraform filename. i.e monitors.tf\")\n parser.add_argument(\"--mode\", \"-m\", type=str, required=False, choices=[\"w\",\"a\"], default=\"w\", help=\"Create new Terraform file or Append to existing one.\")\n parser.add_argument(\"--all\", action=\"store_true\", help=\"Create Terraform files per group.\")\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"Verbose output for the script.\")\n\n return parser.parse_args()", "def arg_list():\n arg_list = [\n ['-d', '--domain', 'Specify the domain you are using'],\n ['-t', '--template-path', 'Specify template path'],\n ['-s', '--secrets-path', 'Specify template path'],\n ['-p', '--project', 'Specify a project name'],\n ['-c', '--cloud-platform', 'Specify the platform used'],\n ['-so', '--secrets-only', 'Generate secrets only'],\n ['-db', '--database-host', 'Specify the database host'],\n ['-dbc', '--database-connection-name', 'Specify the database connection name (GCP)'],\n ['-sbn', '--storage-bucket-name', 'Specify storage bucket name'],\n ['-sb', '--storage-backend', 'Specify storage backend s3/gcp/filesystem'],\n ['--acm', '--aws-cert-arn', 'Specify AWS ACM'],\n ['--sg-id', '--aws-alg-sg-id', 'Specify AWS SG ID'],\n ['--sentry', '--senty-dsn', 'Specify Sentry DSN'],\n ['-e', '--environment', 'Specify environment'],\n ['-g', '--gather', 'enable Gather yes or no'],\n ['--cm', '--cert-manager', 'Using cert manager?'],\n ['-m', '--modules', 'Aether modules i.e odk,ui,sync'],\n ['-r', '--redis-url', 'Redis endpoint for CouchDB sync'],\n ['-cdb', '--couchdb-url', 'Redis endpoint for CouchDB sync'],\n ['-gc', '--google-client-id', ' Google client ID for CouchDB sync']\n ]\n return arg_list", "def parse_arguments(cls):\r\n parser = argparse.ArgumentParser(description='Easy Infer for model benchmark')\r\n cls.base_arg_parse(parser)\r\n cls.model_arg_parse(parser)\r\n cls.task_arg_parse(parser)\r\n args = parser.parse_args()\r\n return args", "def get_arguments_string(self):\n result = self.__get_client_server_arg_string('')\n result = self.__get_x_args_string(result)\n result = self.__get_xx_args_string(result)\n result = self.__get_system_property_args_string(result)\n result = self.__get_unsorted_args_string(result)\n return result", "def args(self):\n return (\n self.species_names,\n self.rxn_names,\n self.react_stoic,\n self.prod_stoic,\n self.init_state,\n self.k_det,\n self.chem_flag,\n self.volume,\n )", "def definearguments(self, customparser):\n if not customparser:\n return\n\n add_login_arguments_group(customparser)\n\n customparser.add_argument(\n '--serviceaccount',\n dest='serviceacc',\n action=\"store_true\",\n help=\"Optionally include this flag if you wish to created account \"\\\n \"to be a service account.\",\n default=False\n )\n customparser.add_argument(\n '--addprivs',\n dest='optprivs',\n nargs='*',\n action=_AccountParse,\n type=str,\n help=\"Optionally include this flag if you wish to specify \"\\\n \"which privileges you want added to the iLO account. This overrides the default of \"\\\n \"duplicating privileges of the currently logged in account on the new account. Pick \"\\\n \"privileges from the privilege list in the above help text. EX: --addprivs=1,2,4\",\n default=None\n )\n customparser.add_argument(\n '--removeprivs',\n dest='optprivs',\n nargs='*',\n action=_AccountParse,\n type=str,\n help=\"Optionally include this flag if you wish to specify \"\\\n \"which privileges you want removed from the iLO account. This overrides the default of\"\\\n \" duplicating privileges of the currently logged in account on the new account. Pick \"\\\n \"privileges from the privilege list in the above help text. EX: --removeprivs=1,2,4\",\n default=None\n )\n customparser.add_argument(\n '--role',\n dest='role',\n choices=['Administrator', 'ReadOnly', 'Operator'],\n help=\"Optionally include this flag if you would like to specify Privileges by role. \"\\\n \"Valid choices are: Administrator, ReadOnly, Operator\",\n default=None\n )\n customparser.add_argument(\n '-j',\n '--json',\n dest='json',\n action=\"store_true\",\n help=\"Optionally include this flag if you wish to change the\"\\\n \" displayed output to JSON format. Preserving the JSON data\"\\\n \" structure makes the information easier to parse.\",\n default=False\n )", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n arm_template_display_name: Optional[pulumi.Input[str]] = None,\n deployment_properties: Optional[pulumi.Input[pulumi.InputType['EnvironmentDeploymentPropertiesArgs']]] = None,\n lab_name: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n user_name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def getArguments():\n parser = argparse.ArgumentParser()\n\n # Required arguments\n parser.add_argument('dataset', help='\\\n The directory with the dataset (should have Images/, Annotations/).')\n \n # Optional arguments\n parser.add_argument('--list', default=\"all\", help='\\\n The list of images (ImageSets/{all ; trainval ; test ; ... }.txt).')\n \n return parser.parse_args()", "def __MakeEnvironment(self):\n environment= os.environ.copy()\n\n for key, value in self.__context.items():\n if type(value) is str:\n name = \"QMV_\" + key.replace(\".\", \"__\")\n environment[name]= value\n\n return environment", "def prepare_args(self):\n args = []\n if self.login:\n args.extend(['-L', cfg['tools.hydra.loginfile']])\n if self._port.is_ipv6:\n args.append('-6')\n\n args.extend(['-P', cfg['tools.hydra.passwordfile'], '-s', str(self._port.number), str(self._port.node.ip),\n self.service, ])\n return args", "def __init__(self, *args, **kwargs):\n Cli.__init__(self, *args, **kwargs)\n # Set common arguments\n self.add_argument(\n '--build', env_var=\"BUILD\", default='latest',\n help='Override build id, defaults to latest')\n self.add_argument(\n '--buildroot', env_var=\"BUILD_ROOT\", default='builds',\n help='Build directory')\n self.add_argument(\n '--schema', env_var=\"META_SCHEMA\",\n default='/usr/lib/coreos-assembler/v1.json',\n help='Schema to use. Set to NONE to skip all validation')", "def add_arguments(cls):\n return [\n (('--yes',), dict(action='store_true', help='clean .git repo')),\n (('--variable', '-s'),\n dict(nargs='+', help='set extra variable,format is name:value')),\n (('--skip-builtin',),\n dict(action='store_true', help='skip replace builtin variable')),\n\n (('--dir',), dict(nargs='?', default=os.getcwd(),\n help='set working directory')),\n (('--debug',), dict(action='store_true', help='open debug mode')),\n (('--dry-run',), dict(action='store_true',\n help='print command instead execute it')),\n (('--verbose', '-v'), dict(action='count')),\n ]", "def get_args():\n # create the parser\n parser = argparse.ArgumentParser()\n # Add the arguments to be parsed\n parser.add_argument(\"--num_rollouts\", type=int, default=1, help=\"Number of times to rollout agent in env\")\n parser.add_argument(\"--render\", choices=('True','False'), help=\"Render the rollout\")\n parser.add_argument(\"--seed\", type=int, default=4)\n parser.add_argument(\"--x_thresh\", type=float, default=1.5)\n args = parser.parse_args()\n args.render = True if args.render == 'True' else False\n\n return args", "def init_args():\n parser = argparse.ArgumentParser(\n description=\"DeltaSherlock Client software.\")\n parser.add_argument('-v', '--version', action='version', version=VERSION)\n parser.add_argument('-c', '--config', action='store', dest='config_file',\n default='./config.ini', help=\"Path to config file. [default: \\\n %(default)s]\")\n parser.add_argument('-d', '--daemon', action='store_true', dest='daemon',\n default=False, help=\"Run in daemon mode. [default: \\\n %(default)s]\")\n return parser.parse_args()", "def build_arguments(self, *cmd_args, **cmd_kwargs):\n args = []\n args.extend(cmd_args)\n\n for raw_key, value in cmd_kwargs.items():\n if len(raw_key) == 1:\n args.append('-{}'.format(raw_key))\n else:\n key = raw_key.replace('_', '-')\n args.append('--{}'.format(key))\n\n if value is True:\n # If True, it is enough.\n # e.g.: system=True translates to --system\n continue\n\n args.append(str(value))\n\n return args", "def sysArgs(arguments):\n\n # if no args print usage\n if not arguments:\n print 'usage: [--auto] [--manual user_ID server_IP server_Port]'\n sys.exit()\n\n # --auto flag\n if arguments[0] == '--auto':\n return (USER_NAME, SERVER_HOST, SERVER_PORT)\n\n # --manual flag\n if arguments[0] == '--manual':\n return (arguments[1], arguments[2], int(arguments[3]))", "def parse_args():\n parser = argparse.ArgumentParser(\n description='Handover of environment properties')\n parser.add_argument('broker_host',\n help='hostname/ip of the kafka broker')\n parser.add_argument('kafka_topic',\n help='name of the kafka topic to store the messages')\n return parser.parse_args()", "def environment(self) -> Optional[pulumi.Input['EnvironmentArgs']]:\n return pulumi.get(self, \"environment\")", "def envs(self, envs):\n self._instructions_setter('ENV', envs)", "def setup(self):\r\n \r\n if self.requestedAction == admin.ACTION_EDIT or self.requestedAction == admin.ACTION_CREATE:\r\n \r\n # Set the required parameters\r\n for arg in RadiusAuthRestHandler.REQUIRED_PARAMS:\r\n self.supportedArgs.addReqArg(arg)\r\n \r\n # Set up the valid parameters\r\n for arg in RadiusAuthRestHandler.VALID_PARAMS:\r\n if arg not in RadiusAuthRestHandler.REQUIRED_PARAMS:\r\n self.supportedArgs.addOptArg(arg)", "def _render_args(self, target, output_dir):\n args = []\n\n # Glossary of used aapt flags. Aapt handles a ton of action, this will continue to expand.\n # : 'package' is the main aapt operation (see class docstring for more info).\n # : '-m' is to \"make\" a package directory under location '-J'.\n # : '-J' Points to the output directory.\n # : '-M' is the AndroidManifest.xml of the project.\n # : '-S' points to the resource_dir to \"spider\" down while collecting resources.\n # : '-I' packages to add to base \"include\" set, here it is the android.jar of the target-sdk.\n args.extend([self.aapt_tool(target.build_tools_version)])\n args.extend(['package', '-m', '-J', output_dir])\n args.extend(['-M', target.manifest.path])\n args.extend(['-S', target.resource_dir])\n args.extend(['-I', self.android_jar_tool(target.manifest.target_sdk)])\n args.extend(['--ignore-assets', self.ignored_assets])\n logger.debug('Executing: {0}'.format(' '.join(args)))\n return args", "def get_arguments():\n parser = argparse.ArgumentParser(description=\"Factorized Spatial Embeddings\")\n parser.add_argument(\"--mode\", default=MODE, choices=[\"train\", \"test\"])\n parser.add_argument(\"--batch_size\", type=int, default=BATCH_SIZE,\n help=\"Number of images sent to the network in one step.\")\n parser.add_argument(\"--input_dir\", type=str, default=DATA_DIRECTORY,\n help=\"Path to the directory containing the training or testing images.\")\n parser.add_argument(\"--K\", type=int, default=LANDMARK_N,\n help=\"Number of landmarks.\")\n parser.add_argument(\"--scale_size\", type=int, default=SCALE_SIZE,\n help=\"Scale images to this size before cropping to CROP_SIZE\")\n parser.add_argument(\"--crop_size\", type=int, default=CROP_SIZE,\n help=\"CROP images to this size\")\n parser.add_argument(\"--checkpoint\", default=CHECKPOINT,\n help=\"Directory with checkpoint to resume training from or use for testing\")\n parser.add_argument(\"--output_dir\", default=OUTPUT_DIR,\n help=\"Where to put output files\")\n parser.add_argument(\"--img_folder\",type=str, default='images',help=\"save the predicted landmarks\")\n \n return parser.parse_args()", "def final_kwargs(self, **kwargs):\n # If `env` has been provided, inject a full copy of the local\n # environment, with the values in `env` overriding the local\n # environment.\n try:\n extra_env = kwargs.pop('env')\n kwargs['env'] = self.command.os.environ.copy()\n kwargs['env'].update(extra_env)\n except KeyError:\n # No explicit environment provided.\n pass\n\n # If `cwd` has been provded, ensure it is in string form.\n try:\n cwd = kwargs.pop('cwd')\n kwargs['cwd'] = str(cwd)\n except KeyError:\n pass\n\n return kwargs", "def get_arguments():\n\n # Creates the ArgumentParser\n parser = argparse.ArgumentParser(\n usage='Optimizes a boolean-based ensemble using Univariate Marginal Distribution Algorithm.')\n\n # Adds a dataset argument with pre-defined choices\n parser.add_argument('dataset', help='Dataset identifier', choices=['RSDataset', 'RSSCN7', 'UCMerced_LandUse'])\n\n # Adds a descriptor argument with pre-defined choices\n parser.add_argument('descriptor', help='Descriptor identifier', choices=['global', 'cnn', 'all'])\n\n # Adds an identifier argument to the desired fold identifier\n parser.add_argument('fold', help='Fold identifier', type=int, choices=range(1, 6))\n\n # Adds an identifier argument to the desired number of agents\n parser.add_argument('-n_agents', help='Number of meta-heuristic agents', type=int, default=10)\n\n # Adds an identifier argument to the desired number of iterations\n parser.add_argument('-n_iter', help='Number of meta-heuristic iterations', type=int, default=10)\n\n return parser.parse_args()", "def get_kwargs():\n\treturn get_kwargs_raw(sys.argv)", "def create_training_args(self, input_dict, output_dict, exec_properties,\n executor_class_path, training_inputs,\n job_id) -> Dict[Text, Any]:\n pass", "def setup_args(cls) -> ParlaiParser:\n # we want to later deprecate this for add_cmdline_args", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def SetupEnvironment(self):\n pass", "def configure_args(self):\n super(InstaApriori, self).configure_args()\n self.add_passthru_arg('-iteration', type=int, help=\"The current iteration. Not used as a command line argument\")\n self.add_passthru_arg('--k', type=int, default=3, help=\"Specify the maximum size of itemsets to find\")\n self.add_passthru_arg('--s', type=float, help=\"Specify the minimum support threshold\")\n self.add_passthru_arg('--c', type=float, default=0, help=\"Specify the minimum confidence threshold\")\n self.add_file_arg('--f', default='frequent.txt',\n help=\"Specify the name of the file used to store frequent itemsets\")", "def __init__(__self__, *,\n agent_pool: Optional[pulumi.Input[str]] = None,\n apms: Optional[pulumi.Input[Sequence[pulumi.Input['ApmReferenceArgs']]]] = None,\n builder: Optional[pulumi.Input[str]] = None,\n certificates: Optional[pulumi.Input[Sequence[pulumi.Input['CertificateReferenceArgs']]]] = None,\n env: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n relative_path: Optional[pulumi.Input[str]] = None,\n resource_requests: Optional[pulumi.Input['BuildResourceRequestsArgs']] = None):\n if agent_pool is not None:\n pulumi.set(__self__, \"agent_pool\", agent_pool)\n if apms is not None:\n pulumi.set(__self__, \"apms\", apms)\n if builder is not None:\n pulumi.set(__self__, \"builder\", builder)\n if certificates is not None:\n pulumi.set(__self__, \"certificates\", certificates)\n if env is not None:\n pulumi.set(__self__, \"env\", env)\n if relative_path is not None:\n pulumi.set(__self__, \"relative_path\", relative_path)\n if resource_requests is not None:\n pulumi.set(__self__, \"resource_requests\", resource_requests)", "def generic_env_configure_vars(self, verbose=False):\n\n if self.settings.os == \"Windows\":\n self.output.fatal(\"Cannot build on Windows, sorry!\")\n return\n\n if self.settings.os == \"Linux\" or self.settings.os == \"Macos\":\n libs = 'LIBS=\"%s\"' % \" \".join([\"-l%s\" % lib for lib in self.deps_cpp_info.libs])\n ldflags = 'LDFLAGS=\"%s\"' % \" \".join([\"-L%s\" % lib for lib in self.deps_cpp_info.lib_paths]) \n archflag = \"-m32\" if self.settings.arch == \"x86\" else \"\"\n cflags = 'CFLAGS=\"-fPIC %s %s\"' % (archflag, \" \".join(self.deps_cpp_info.cflags))\n cpp_flags = 'CPPFLAGS=\"%s %s\"' % (archflag, \" \".join(self.deps_cpp_info.cppflags))\n command = \"env %s %s %s %s\" % (libs, ldflags, cflags, cpp_flags)\n # elif self.settings.os == \"Windows\" and self.settings.compiler == \"Visual Studio\":\n # cl_args = \" \".join(['/I\"%s\"' % lib for lib in self.deps_cpp_info.include_paths])\n # lib_paths= \";\".join(['\"%s\"' % lib for lib in self.deps_cpp_info.lib_paths])\n # command = \"SET LIB=%s;%%LIB%% && SET CL=%s\" % (lib_paths, cl_args)\n # if verbose:\n # command += \" && SET LINK=/VERBOSE\"\n \n return command", "def _initialize_from_cookiecutter_args(self, cookiecutter_args: dict[str, str]):\n self.golden_tests = cookiecutter_args[\"add_golden\"] == \"y\"\n self.github_owner = cookiecutter_args[\"github_owner\"]\n # Allow copyright holder and copyright year to be missing in the cookiecutter\n # args. Fallback to VSHN AG <info@vshn.ch> and the current year here.\n self.copyright_holder = cookiecutter_args.get(\n \"copyright_holder\", \"VSHN AG <info@vshn.ch>\"\n )\n self.copyright_year = cookiecutter_args.get(\"copyright_year\")\n if \"test_cases\" in cookiecutter_args:\n self.test_cases = cookiecutter_args[\"test_cases\"].split(\" \")\n else:\n self.test_cases = [\"defaults\"]\n\n return False", "def init_args():\n parser = argparse.ArgumentParser(description='Create xls for Tom')\n parser.add_argument('start', metavar='N', type=int, help='starting '\n 'number')\n parser.add_argument('total_x', metavar='N', type=int,\n help='total number of x rows')\n parser.add_argument('total_y', metavar='N', type=int,\n help='total number of y columns')\n parser.add_argument('filename', metavar='NAME', default='test.csv',\n type=str, help='file name to write to, should end in '\n 'csv')\n return parser.parse_args()", "def set_args():\n # Initialise argparse object\n parser = argparse.ArgumentParser(description='Set some arguments for our script')\n # Add some arguments, elements are: short form name, long form name, type of input expected\n # default value if you don't set an argument, help string (shown if you run with --help)\n # nargs is so that we can define multiple values for a single argument\n\n parser.add_argument('-q', '--query-terms', type=str, default='Venezuela Covid',\n help='list of strings to search for', nargs='*')\n\n parser.add_argument('-p', '--page-limit', type=int,\n help='number to limit search pages to')\n\n # set the argument parser and return\n args = parser.parse_args()\n return args", "def build_args(self, parser):\n raise NotImplementedError('build_args() must be implemented')", "def define_options(self):\n\n from clinica.engine.cmdparser import PIPELINE_CATEGORIES\n\n clinica_comp = self._args.add_argument_group(PIPELINE_CATEGORIES['CLINICA_COMPULSORY'])\n clinica_comp.add_argument(\"caps_directory\",\n help='Path to the CAPS directory.')\n clinica_comp.add_argument(\"list_bvalues\", type=str,\n help='String listing all the shells (i.e. the b-values) in the corrected DWI datasets comma separated (e.g, 0,300,700,2200)')\n # Optional arguments\n clinica_opt = self._args.add_argument_group(PIPELINE_CATEGORIES['CLINICA_OPTIONAL'])\n\n clinica_opt.add_argument(\"-wd\", \"--working_directory\",\n help='Temporary directory to store pipeline intermediate results')\n clinica_opt.add_argument(\"-np\", \"--n_procs\", type=int, default=4,\n help='Number of cores used to run in parallel')\n clinica_opt.add_argument(\"-tsv\", \"--subjects_sessions_tsv\",\n help='TSV file containing a list of subjects with their sessions.')", "def __init__(__self__,\n resource_name: str,\n args: ApplicationArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: ApplicationArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: ApplicationArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def _generate_environment(self):\n envvars = {}\n for key in self.envvars:\n try:\n envvars[key] = os.environ[key]\n except KeyError:\n continue\n\n # Warn the user that we cannot support secrets\n if envvars:\n logger.warning(\"This API does not support environment secrets.\")\n return envvars", "def get_arguments():\n parser = argparse.ArgumentParser(description=\"DeepLab-ResNet Network\")\n parser.add_argument(\"--mode\", choices={\"SUM\", \"VAL\"}, default=\"VAL\", help=\"\")\n parser.add_argument(\"--sdf-path\", type=str, default=SDF_PATH, help=\"\")\n parser.add_argument(\"--summary-file\", type=str, default=SUMMARY_FILE, help=\"\")\n\n return parser.parse_args()", "def command_line_arguments():\n _parser.add_argument('-l', '--list', nargs='+',\n help='<Required> Set flag', required=True)\n _parser.add_argument(\"-A\", \"--access\", required=True,\n help=\"access to host => grant/revoke\")", "def Args(parser):\n flags.AddRegion(parser)\n flags.AddCluster(parser)", "def init():\n env = Environment(5, 5, 20, [10, 20, 10, 5])\n return env", "def setup_args():\n parser = argparse.ArgumentParser(\n description=\"Take probe set and generate MSA for all variants for \"\n \"each gene\")\n\n parser.add_argument(\n \"-o\", \"--output_path\",\n help=\"Directory to save the output to. Default: Current Directory\",\n type=str, default='.')\n\n parser.add_argument(\n \"-p\", \"--probe\",\n help=\"Path to the probe fasta.\",\n type=str,\n required=True)\n\n parser.add_argument(\n \"-g\", \"--gene_refs\",\n help=\"Directory where gene references are located.\",\n required=True,\n type=str)\n\n args = parser.parse_args()\n return args", "def templateargs(self, target_jar, confs=None):\r\n raise NotImplementedError()", "def _add_arguments(self):\r\n self._parser.add_argument(\r\n '-s', '--server',\r\n required=True,\r\n help=\"enter server name\")\r\n self._parser.add_argument(\r\n '-db', '--database',\r\n required=True,\r\n help='enter database name')\r\n self._parser.add_argument(\r\n '-u', '--username',\r\n help='enter username')\r\n self._parser.add_argument(\r\n '-p', '--password',\r\n help='enter password')\r\n #self._parser.add_argument(\r\n # '-h', '--help',\r\n # help='show this help message and exit')\r", "def __init__(self, metadata, environment, component, image, version, s3_bucket, exec_env):\n self.environment = environment\n self.component = component\n self.s3_bucket = s3_bucket\n self.exec_env = exec_env\n self.image = image\n self.version = version\n self.metadata = metadata\n\n # generate Terragrunt config as part of object initialisation\n self.config()", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n application_name: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n resource_group_id: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n __props__=None):\n ...", "def get_model_args(args):\r\n global MODEL_ARCHITECTURE, MODEL_OPTIMIZER, ADVANCED_OPTIONS, \\\r\n DATA_OPTIONS, BERT_CONFIG\r\n\r\n required_args = MODEL_ARCHITECTURE | MODEL_OPTIMIZER | ADVANCED_OPTIONS \\\r\n | DATA_OPTIONS | BERT_CONFIG\r\n\r\n arg_values = {k: v for k, v in vars(args).items() if k in required_args}\r\n return argparse.Namespace(**arg_values)" ]
[ "0.61305785", "0.61261475", "0.6043725", "0.6030461", "0.5991547", "0.59570146", "0.58903545", "0.58040184", "0.5788831", "0.5748878", "0.5733352", "0.5728621", "0.5659946", "0.5640539", "0.56395596", "0.5638727", "0.5636724", "0.5635059", "0.56332546", "0.56139416", "0.5601574", "0.55852455", "0.5581533", "0.5576569", "0.55711496", "0.5567551", "0.55603194", "0.552644", "0.5524095", "0.55148804", "0.55050147", "0.549587", "0.54946315", "0.549269", "0.5491758", "0.54860055", "0.5482095", "0.54782206", "0.5471729", "0.5469217", "0.5464437", "0.5463652", "0.5461709", "0.5430272", "0.54191846", "0.540036", "0.53973335", "0.5388716", "0.5375427", "0.5367907", "0.535995", "0.5341682", "0.53408206", "0.5339632", "0.5330408", "0.5322179", "0.531847", "0.53121877", "0.531159", "0.53092825", "0.53068185", "0.5305282", "0.5294373", "0.5292967", "0.52789193", "0.5275688", "0.52741736", "0.5266547", "0.5263297", "0.5257347", "0.5256848", "0.52533597", "0.52533597", "0.52533597", "0.52533597", "0.52533597", "0.52533597", "0.5245208", "0.5243965", "0.52422965", "0.5241939", "0.52382404", "0.5236894", "0.5236145", "0.52354634", "0.52339196", "0.52221", "0.52221", "0.52221", "0.52204823", "0.52143806", "0.5209754", "0.5208163", "0.52071846", "0.52032256", "0.52003235", "0.51974094", "0.51910985", "0.5190827", "0.5184558" ]
0.5898593
6
The name of the application that is associated with this environment.
def application_name(self) -> pulumi.Input[str]: return pulumi.get(self, "application_name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def app_name(self) -> str:\n return self._app_name", "def app_name(self):\n return self._app_name", "def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_name\")", "def app_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"app_name\")", "def app_name(self): # pylint:disable=function-redefined\n return self._app_name", "def get_name():\n return config.APP_NAME", "def application_name(self) -> Optional[str]:\n return pulumi.get(self, \"application_name\")", "def get_app_name(self):\n return getattr(self, '_app_name', None)", "def app_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_name\")", "def application_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_name\")", "def app(self) -> str:\n return pulumi.get(self, \"app\")", "def name(self):\n return self._env_name", "def name(self):\n return self.application_tree['name']", "def _app(self) -> str:\n return self.charm.app.name", "def _get_app_name(app):\n return app[APP_NAME_KEY]", "def app_name(self):\n module_filepath = inspect.getfile(type(self))\n parent_dir = os.path.dirname\n app_dirpath = parent_dir(parent_dir(parent_dir(module_filepath)))\n app_name = os.path.basename(app_dirpath)\n return app_name", "def getApplicationName(self) -> unicode:\n ...", "def _get_app_name(self):\n # TODO move app name into pyglet.app (also useful for OS X menu bar?).\n return sys.argv[0]", "def get_name(self, name):\n return self.apps[name]['name']", "def name(self):\n return get_env_name(self.tool_name,\n self._python,\n self._requirements,\n self._tagged_env_vars)", "def current_app(self) -> str:\n app_id = self.app.get_current() # Returns the application ID (string) of the\n foreground_app = [x for x in self.app.list_apps() if app_id == x[\"id\"]][0]\n return foreground_app['title']", "def module_name(self) -> str | None:\n try:\n return self._app_name.replace(\"-\", \"_\")\n except AttributeError:\n # If the app was created from an interactive prompt,\n # there won't be a module name.\n return None", "def _app_id(self):\n return '{}-{}'.format(self.config['app']['name'],\n self.config['app']['version'])", "def app_name(self, value):\n self._app_name = value", "def app_name(self):\n return self._chromecast.app_display_name if self._chromecast else None", "def name(self):\r\n if self._name is not None:\r\n return self._name\r\n else:\r\n try:\r\n return Inspection.find_application_name()\r\n # TODO(wickman) Be more specific\r\n except Exception:\r\n return 'unknown'", "def environment_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"environment_name\")", "def get_application_name(self, feed_id):\r\n return self._handler.get_application_name(feed_id)", "def fallback_application_name() -> str:\n # Import here instead of at the top to avoid an ImportError caused by an\n # import cycle. This can be removed once the import graph of id3c.cli is\n # less tangled.\n from ..cli.utils import running_command_name\n\n # \"The application_name can be any string of less than NAMEDATALEN\n # characters (64 characters in a standard build).\"¹\n #\n # psycopg2 / libpq will truncate for us, but they will issue a NOTICE log\n # message if they do. Avoid the cluttery notice by truncating ourselves.\n #\n # ¹ https://www.postgresql.org/docs/current/runtime-config-logging.html#GUC-APPLICATION-NAME\n max_len = 64\n appname = running_command_name()\n\n return shorten(appname, max_len, \"...\")", "def environment_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"environment_name\")", "def environment_label(self) -> str:\n return self._environment_label", "def programName(self):\n return self._parser.prog", "def product(self):\n return self.appName", "def name(self):\n\n return self.manifest[\"name\"]", "def app_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"app_id\")", "def app_id(self):\n return self._app_id or self._modules['default'].data.get('application')", "def _DefaultAppId():\n return os.getenv('APPLICATION_ID', '_')", "def call_name(self):\n return str(self.executable.name)", "def master_name(self):\n return self._LAUNCHPAD_NAME", "def application_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_id\")", "def get_app_label(app_module):\n return app_module.__name__.split('.')[-1]", "def appName(self, name: str) -> \"SparkSession.Builder\":\n return self.config(\"spark.app.name\", name)", "def app_id(self) -> str:\n return self._app_id", "def _generateApplicationName(self, obj, **args):\n result = []\n try:\n result.append(obj.getApplication().name)\n except:\n pass\n return result", "def get_app_name(i):\n return app_id + '-' + str(i)", "def app_label(cls):\n return cls.model_meta.app_label", "def get_app(self):\n\n app = APP\n return app", "def env_name(self):\n return f\"{self.project_name}-{self.stage}\"", "def get_application(self):\n return self._silva_root", "def name(self):\n return self._config.get(CONF_NAME)", "def app_names(self):\n return self.get_app_names()", "def application(self):\n\n if not self._applicationDef:\n raise NotValidPlatformException(\n 'No application definition is available. Are you sure you are running on Platform.sh?'\n )\n return self._applicationDef", "def application_id(self) -> Optional[str]:\n return pulumi.get(self, \"application_id\")", "def app_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"app_id\")", "def stackname(self):\n return self.BASE_NAME.format(**self.conf)", "def name(self):\n return self.config[\"name\"]", "def get_env_name(self):\n if self.options.environment:\n return self.options.environment\n elif os.environ.get(\"JUJU_ENV\"):\n return os.environ['JUJU_ENV']\n\n env_ptr = os.path.join(self.juju_home, \"current-environment\")\n if os.path.exists(env_ptr):\n with open(env_ptr) as fh:\n return fh.read().strip()\n\n with open(self.get_env_conf()) as fh:\n conf = yaml.safe_load(fh.read())\n if not 'default' in conf:\n raise ConfigError(\"No Environment specified\")\n return conf['default']", "def name(self):\n return self._config.backend_name", "def get_name():\n return __name__", "def getApp(self):\n return self.serviceClass.app", "def getWindowName(self):\n return self.__windowName", "def name(self):\n return self.appliance_name", "def package_name(self) -> str:\n return pulumi.get(self, \"package_name\")", "def name(self):\n return self._config_name", "def application(self):\n return self._application", "def application(self):\n return self._application", "def app_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_id\")", "def name(self) -> str:\n name = self._config[\"name\"]\n assert isinstance(name, str) # noqa: S101\n return name", "def get_app_hostname():\n if not is_running_on_app_engine() or is_running_on_localhost():\n return None\n\n version = modules.get_current_version_name()\n app_id = app_identity.get_application_id()\n\n suffix = 'appspot.com'\n\n if ':' in app_id:\n tokens = app_id.split(':')\n api_name = tokens[1]\n if tokens[0] == 'google.com':\n suffix = 'googleplex.com'\n else:\n api_name = app_id\n\n # Check if this is the default version\n default_version = modules.get_default_version()\n if version == default_version:\n return '{0}.{1}'.format(app_id, suffix)\n else:\n return '{0}-dot-{1}.{2}'.format(version, api_name, suffix)", "def _extract_appname(self, log):\n appname = \"\"\n if \"appLaunch\" in log:\n appname = log[\"appLaunch\"][\"appName\"]\n else:\n self.logger.info(\"no applaunch field\")\n self.logger.info(log[\"event\"])\n pass \n \n return appname", "def application_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_id\")", "def get_name(app):\n from uuid import uuid4 as uuid\n return (f'accelpy_{app[\"application\"][\"product_id\"]}'\n f'_{str(uuid()).replace(\"-\", \"\")[:8]}')", "def _get_environment(cls):\n return cls.__name__.lower()", "def _get_base_app_name(value):\n value = os.path.basename(value)\n if (\n value.endswith(\".exe\")\n or value.endswith(\".dll\")\n or value.endswith(\".so\")\n ):\n value = os.path.splitext(value)[0]\n\n return value", "def name(cls):\n\n system = platform.system()\n\n # Apply system map\n if system in NAME_MAP:\n system = NAME_MAP[system]\n\n return system", "def name(self) -> str:\n return self.dev.label", "def __repr__(self):\n return '<Application({name})>'.format(name=self.name)", "def name(self):\n return self._path or '__main__'", "def python_name(self):\n return self.requirement.name", "def app_id(self):\n return self._app_id", "def set_name(self, application_name):\r\n self._name = application_name", "def get_wsgi_file_name(self):\n return self.wsgi", "def application_arn(self) -> Optional[str]:\n return pulumi.get(self, \"application_arn\")", "def name(self):\n # This is how PIDs 0 and 4 are always represented in taskmgr\n # and process-hacker.\n if self.pid == 0:\n return \"System Idle Process\"\n if self.pid == 4:\n return \"System\"\n return os.path.basename(self.exe())", "def package_name(self):\n return self._package_name", "def name(self):\r\n return self.setuptools_requirement.project_name", "def env_name(pre_chars='(', post_chars=')'):\n env_path = builtins.__xonsh_env__.get('VIRTUAL_ENV', '')\n if len(env_path) == 0 and xp.ON_ANACONDA:\n env_path = builtins.__xonsh_env__.get('CONDA_DEFAULT_ENV', '')\n env_name = os.path.basename(env_path)\n if env_name:\n return pre_chars + env_name + post_chars", "def application_object_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_object_id\")", "def get_raw_server_name():\n from google.appengine.api import app_identity\n return '%s.%s.appspot.com' % (os.environ[\n 'CURRENT_VERSION_ID'].split('.')[0], app_identity.get_application_id())", "def module_name(self):\n return self.name()", "def get_name(self):\n return self.settings.get(\"name\", None)", "def get_process_name(self):\n\n return self._args.t", "def get_name(self) -> str:\n return os.path.split(os.getcwd())[-1]", "def app_label(obj):\n try:\n return lower(obj._meta.object_name)\n except AttributeError:\n return ''", "def server_app_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_app_id\")", "def application_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_id\")" ]
[ "0.8759173", "0.8733051", "0.8620485", "0.8620485", "0.8601236", "0.8559315", "0.8510323", "0.8470934", "0.83924884", "0.8365387", "0.81169367", "0.80879086", "0.8058957", "0.80493975", "0.80454445", "0.80223936", "0.8003628", "0.79939234", "0.7801216", "0.7618629", "0.7484918", "0.7447768", "0.74387485", "0.7420402", "0.7392814", "0.7373414", "0.737086", "0.73540545", "0.73089325", "0.72906715", "0.7228089", "0.7203734", "0.715478", "0.711807", "0.7019559", "0.7000779", "0.6983843", "0.6978822", "0.6926621", "0.68893594", "0.6870236", "0.68542904", "0.6849247", "0.6846084", "0.67824024", "0.6778457", "0.67722195", "0.67273843", "0.6726059", "0.67143023", "0.67108", "0.66936255", "0.66614723", "0.6641312", "0.6621249", "0.66058314", "0.6599743", "0.6588568", "0.65868896", "0.6567537", "0.65568805", "0.6540866", "0.6515158", "0.6507561", "0.64977914", "0.6488593", "0.6488593", "0.64644533", "0.64573604", "0.64571863", "0.6447661", "0.6445232", "0.6444307", "0.6436871", "0.64057", "0.64023125", "0.63975024", "0.6389024", "0.6352953", "0.6341168", "0.6336924", "0.6320092", "0.6313423", "0.6312998", "0.6294923", "0.62785035", "0.6271921", "0.6270897", "0.62690896", "0.62649447", "0.6261067", "0.62599355", "0.62558806", "0.62504154", "0.62385345", "0.623004", "0.622429" ]
0.83143616
13
If specified, the environment attempts to use this value as the prefix for the CNAME in your Elastic Beanstalk environment URL. If not specified, the CNAME is generated automatically by appending a random alphanumeric string to the environment name.
def cname_prefix(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "cname_prefix")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_env_prefix(instrument):\n return \"crds://\"", "def create_r53_name ( base_name, name ) :\n env = get_env_type( base_name )\n if env :\n env = env.lower( )\n if ( env == 'prod' ) :\n return name\n\n return name + '.' + env", "def env_name(self):\n return f\"{self.project_name}-{self.stage}\"", "def env_name(pre_chars='(', post_chars=')'):\n env_path = builtins.__xonsh_env__.get('VIRTUAL_ENV', '')\n if len(env_path) == 0 and xp.ON_ANACONDA:\n env_path = builtins.__xonsh_env__.get('CONDA_DEFAULT_ENV', '')\n env_name = os.path.basename(env_path)\n if env_name:\n return pre_chars + env_name + post_chars", "def env_var_aws_access_key_id():\n return 'AWS_ACCESS_KEY_ID'", "def staging():\n env.hosts = ['staging.example.com']", "def create_env_name(name):\n new_name = re.sub(r'''(?<=[a-z])([A-Z])''', '_\\\\1', name)\n new_name = re.sub(r'\\W+', '_', new_name)\n new_name = re.sub(r'_{2,}', '_', new_name)\n return new_name.upper().strip(\"_\")", "def get_consul_uri():\n if \"CONSUL_HOST\" in os.environ:\n # WARNING! TODO! Currently the env file does not include the port.\n # But some other people think that the port should be a part of that.\n # For now, I'm hardcoding 8500 until this gets resolved.\n return \"http://{0}:{1}\".format(os.environ[\"CONSUL_HOST\"], 8500)\n else:\n raise BadEnviornmentENVNotFound(\"CONSUL_HOST\")", "def env_prefix(self, path):\n if self.is_default:\n return self.root # FIXME: Is this guaranteed to be the right one?\n\n return os.sep.join([path, PROJECT_ENVS_FOLDER,\n self.default_environment])", "def bucket_website_domain_name(self) -> str:\n ...", "def RSA_KEYPAIR_PREFIX() :\n return os.environ.get( \"ATC_KEYPAIR_PREFIX\", \"atc-dev\" )", "def environment_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"environment_name\")", "def cname_prefix(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"cname_prefix\")", "def environment_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"environment_name\")", "def prepend_env(self, env_name, pattern):\n if not self.has_pattern(env_name, pattern):\n if env_name not in self.environ.keys():\n self.environ[env_name] = [pattern]\n else:\n self.environ[env_name].insert(0, pattern)\n if env_name not in self.env_name_changed:\n self.env_name_changed.append(env_name)", "def test_cluster_name_from_environment(env_config):\n args = argparse.Namespace(cfg=os.path.join(TEST_DATA_DIR, 'gcp-defaults.ini'))\n cfg = ElasticBlastConfig(configure(args), task = ElbCommand.SUBMIT)\n\n assert cfg.cluster.results == env_config['ELB_RESULTS']\n assert cfg.cluster.name == env_config['ELB_CLUSTER_NAME']", "def create_dns_name ( base_name, name ) :\n return create_r53_name( base_name, name) + '.mse-esp.com'", "def name(self):\n return self._env_name", "def tracing_name(name: Optional[str] = None) -> str:\n if name is None:\n name = settings.SERVICE_NAME\n return f\"{name}.{settings.ENVIRONMENT.lower()}\"", "def generate_agent_name():\n\n return '{0}-{1}'.format(\n defaults.CLOUDIFY_AGENT_PREFIX,\n uuid.uuid4())", "def swap_cnames(profile, source_environment, destination_environment):\n client = boto3client.get(\"elasticbeanstalk\", profile)\n params = {}\n params[\"SourceEnvironmentName\"] = source_environment\n params[\"DestinationEnvironmentName\"] = destination_environment\n return client.swap_environment_cnames(**params)", "def prepend_environment_variable(self, key, value):\n script_keys = {\n \"k\": key,\n \"v\": value\n }\n script = \"$env:{k} = \\\"{v};$env:{k}\\\"\".format(**script_keys)\n self._printer(script)", "def bucket_domain_name(self) -> str:\n ...", "def bucket_dual_stack_domain_name(self) -> str:\n ...", "def get_raw_server_name():\n from google.appengine.api import app_identity\n return '%s.%s.appspot.com' % (os.environ[\n 'CURRENT_VERSION_ID'].split('.')[0], app_identity.get_application_id())", "def cname(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cname\")", "def get_hostname():\n return re.split(\"\\.\", env.host)[0]", "def env_var_aws_secret_access_key():\n return 'AWS_SECRET_ACCESS_KEY'", "def set_dns_cname ( route53_conn, dns_name, cname_value ) :\n r53 = boto.route53.record.ResourceRecordSets( route53_conn, route_53_hosted_zoneid )\n monitor_dns = r53.add_change( 'UPSERT', dns_name, 'CNAME', ttl=60 )\n monitor_dns.add_value( cname_value )\n r53.commit( )", "def cname(self, cname):\n if (\n self.local_vars_configuration.client_side_validation and cname is None\n ): # noqa: E501\n raise ValueError(\n \"Invalid value for `cname`, must not be `None`\"\n ) # noqa: E501\n\n self._cname = cname", "def get_balancer_name(self):\n return '{}-{}'.format(\n self.config['namespace'],\n self.get_current_env(),\n )", "def _generate_cache_key(self, address):\n\n return re.sub(r'[^a-z0-9]', '', str(address).lower())", "def production_url(service_name):\n project_id = os.getenv(\"GOOGLE_CLOUD_PROJECT\")\n project_url = f\"{project_id}.appspot.com\"\n if service_name == \"default\":\n return f\"https://{project_url}\"\n else:\n return f\"https://{service_name}-dot-{project_url}\"", "def prepend_environment_variable(parent, key, value):\n os.environ[key] = \"{0}{1}{2}\".format(str(value),\n os.pathsep,\n os.environ.get(key) or \"\")\n\n if parent:\n parent.prepend_environment_variable(key, value)", "def _get_environment():\n namespace = current_app.config.get('POD_NAMESPACE').lower()\n if namespace.endswith('dev'):\n return 'DEV'\n if namespace.endswith('test'):\n return 'TEST'\n if namespace.endswith('tools'):\n return 'SANDBOX'\n return ''", "def _get_athena_connection_string(db_name_env_var: str = \"ATHENA_DB_NAME\") -> str:\n ATHENA_DB_NAME: Optional[str] = os.getenv(db_name_env_var)\n ATHENA_STAGING_S3: Optional[str] = os.getenv(\"ATHENA_STAGING_S3\")\n\n if not ATHENA_DB_NAME:\n raise ValueError(\n f\"Environment Variable {db_name_env_var} is required to run integration tests against AWS Athena\"\n )\n\n if not ATHENA_STAGING_S3:\n raise ValueError(\n \"Environment Variable ATHENA_STAGING_S3 is required to run integration tests against AWS Athena\"\n )\n\n url = f\"awsathena+rest://@athena.us-east-1.amazonaws.com/{ATHENA_DB_NAME}?s3_staging_dir={ATHENA_STAGING_S3}\"\n\n return url", "def prepend_environment_variable(self, key, value):\n value = BashParentEnvironment._format_environment_value(value)\n script_keys = {\n \"k\": key,\n \"v\": value\n }\n script = \"export {k}=\\\"{v}:${k}\\\"\".format(**script_keys)\n self._printer(script)", "def environment_label(self) -> str:\n return self._environment_label", "def production_url(service_name):\n project_id = os.environ.get('GOOGLE_CLOUD_PROJECT')\n project_url = '{}.appspot.com'.format(project_id)\n if service_name == 'default':\n return 'https://{}'.format(project_url)\n else:\n return 'https://{}-dot-{}'.format(service_name, project_url)", "def bucket_regional_domain_name(self) -> str:\n ...", "def get_url_prefix(config: Mapping[str, Any]) -> str:\n return _sanitize_url_prefix(config.get('url_prefix'))", "def environment_variable_string(self, name):\n return \"$(\" + name + \")\"", "def get_ami_keypath ( env_type ) :\n return \"/builds/esp/\" + env_type + \"/current/\"", "def get_env_key(obj, key=None):\n return str.join('_', [obj.__module__.replace('.','_').upper(),\n key.upper()])", "def test_generated_cluster_name(env_config_no_cluster):\n args = argparse.Namespace(cfg=os.path.join(TEST_DATA_DIR, 'gcp-defaults.ini'))\n cfg = ElasticBlastConfig(configure(args), task = ElbCommand.SUBMIT)\n\n assert cfg.cluster.results == TEST_RESULTS_BUCKET\n user = getpass.getuser()\n digest = hashlib.md5(TEST_RESULTS_BUCKET.encode()).hexdigest()[0:9]\n assert cfg.cluster.name == f'elasticblast-{user.lower()}-{digest}'", "def _setup_friendly_environ(environ):\n http_host, host_url = determine_host(environ)\n if http_host == host_url:\n space_name = \"frontpage\"\n else:\n space_name = determine_space(environ, http_host)\n\n recipe_name = determine_space_recipe(environ, space_name)\n environ['wsgiorg.routing_args'][1]['recipe_name'] = recipe_name.encode(\n 'UTF-8')", "def getSiteName():\n return os.environ['SITENAME']", "def get_conda_env_name():\n env_name = os.popen('echo $CONDA_DEFAULT_ENV').read().strip()\n if env_name == '' or env_name == '$CONDA_DEFAULT_ENV':\n env_name = 'base'\n logging.info('Anaconda environment: ' + env_name)\n return env_name", "def get_server_url():\n try:\n url = os.environ['API_HOST']\n # print('[ OK ] Server url loaded: ', url)\n except KeyError:\n url = 'http://localhost:3300/'\n print('[ WARNING ] API_HOST environment variable was not found. default server url was set at: ', url)\n\n return url", "def test_wsgi_script_name_on_aws_url(self):\n lh = LambdaHandler(\"tests.test_wsgi_script_name_settings\")\n\n event = {\n \"body\": \"\",\n \"resource\": \"/{proxy+}\",\n \"requestContext\": {},\n \"queryStringParameters\": {},\n \"headers\": {\n \"Host\": \"1234567890.execute-api.us-east-1.amazonaws.com\",\n },\n \"pathParameters\": {\"proxy\": \"return/request/url\"},\n \"httpMethod\": \"GET\",\n \"stageVariables\": {},\n \"path\": \"/return/request/url\",\n }\n response = lh.handler(event, None)\n\n self.assertEqual(response[\"statusCode\"], 200)\n self.assertEqual(\n response[\"body\"],\n \"https://1234567890.execute-api.us-east-1.amazonaws.com/dev/return/request/url\",\n )", "def _staging():\n env.environment = 'staging'\n env.server_name = 'project-staging.dimagi.com'\n env.hosts = [settings.STAGING_HOST]", "def add_argument(self, *args, **kwargs):\n env_var = kwargs.pop('env_var', None)\n if env_var is not None:\n if not env_var.startswith('COSA_'):\n env_var = f\"COSA_{env_var}\"\n ka = kwargs.get(\"help\", '')\n kwargs['help'] = f\"{ka} (Env: {env_var})\"\n default = kwargs.pop('default', None)\n super().add_argument(\n *args, default=os.environ.get(env_var, default), **kwargs)\n else:\n super().add_argument(*args, **kwargs)", "def test_wsgi_script_name_on_domain_url(self):\n lh = LambdaHandler(\"tests.test_wsgi_script_name_settings\")\n\n event = {\n \"body\": \"\",\n \"resource\": \"/{proxy+}\",\n \"requestContext\": {},\n \"queryStringParameters\": {},\n \"headers\": {\n \"Host\": \"example.com\",\n },\n \"pathParameters\": {\"proxy\": \"return/request/url\"},\n \"httpMethod\": \"GET\",\n \"stageVariables\": {},\n \"path\": \"/return/request/url\",\n }\n response = lh.handler(event, None)\n\n self.assertEqual(response[\"statusCode\"], 200)\n self.assertEqual(response[\"body\"], \"https://example.com/return/request/url\")", "def set_BucketName(self, value):\n super(PutBucketWebsiteRedirectInputSet, self)._set_input('BucketName', value)", "def ecr_image_name(dev_account_id, region, component_name, version):\n return '%s.dkr.ecr.%s.amazonaws.com/%s:%s' % (dev_account_id, region, component_name, 'dev' if version is None else version)", "def GetEnvironment(self):\n environ = super(ServiceHandlerTest, self).GetEnvironment()\n if self.remote_host:\n environ['REMOTE_HOST'] = self.remote_host\n if self.server_host:\n environ['SERVER_HOST'] = self.server_host\n return environ", "def get_admin_bucket_name ( location = None, region_name = None ) :\n if region_name :\n location = get_s3_location( region_to_location_map[ region_name ] )\n\n if not location or len( location ) < 1 :\n location = 'us-standard'\n\n return 'admin.mse-esp.com-' + location", "def build_bucket_url(bucket_name) -> str:\n return \"https://s3.console.aws.amazon.com/s3/buckets/{0}\".format(bucket_name)", "def bucket_domain_name(self) -> str:\n return jsii.get(self, \"bucketDomainName\")", "def bucket_domain_name(self) -> str:\n return jsii.get(self, \"bucketDomainName\")", "def SANDBOX(cls):\n\n return DataCenter.Environment(\"https://sandbox.zohoapis.eu\", cls().get_iam_url(), cls().get_file_upload_url())", "def get_name():\n return config.APP_NAME", "def cname(self):\n return self._cname()", "def name(self):\n return get_env_name(self.tool_name,\n self._python,\n self._requirements,\n self._tagged_env_vars)", "def stackname(self):\n return self.BASE_NAME.format(**self.conf)", "def host(self):\r\n return self._environ.get('HTTP_HOST', '')", "def get_hostname(config):\n KEY = os.environ.get(\"DWH_AWS_KEY\")\n SECRET = os.environ.get(\"DWH_AWS_SECRET\")\n redshift = boto3.client('redshift', region_name=\"us-west-2\",\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET)\n CLUSTER_IDENTIFIER = config.get(\"CLUSTER\", \"CLUSTER_IDENTIFIER\")\n cluster_props = redshift.describe_clusters(\n ClusterIdentifier=CLUSTER_IDENTIFIER)['Clusters'][0]\n endpoint = cluster_props[\"Endpoint\"][\"Address\"]\n return endpoint", "def basic_url(self):\n return self.base_name + '.cloudlabs.rc.ucl.ac.uk'", "def staging():\n env.settings = 'staging'\n env.hosts = ['db.beta.tribapps.com'] \n env.user = 'newsapps'\n env.s3_bucket = 'media-beta.tribapps.com'", "def get_vpc_name ( base_name ) :\n return base_name + '-VPC'", "def platform_config_filename(region, account_prefix, prod):\n return 'infra/platform-config/%s/%s/%s.json' % (\n account_prefix, \"prod\" if prod else \"dev\", region\n )", "def _prefix_env_variable(environ, name, paths, subfolders):\n value = environ[name] if name in environ else ''\n environ_paths = [path for path in value.split(os.pathsep) if path]\n checked_paths = []\n for path in paths:\n if not isinstance(subfolders, list):\n subfolders = [subfolders]\n for subfolder in subfolders:\n path_tmp = path\n if subfolder:\n path_tmp = os.path.join(path_tmp, subfolder)\n # skip nonexistent paths\n if not os.path.exists(path_tmp):\n continue\n # exclude any path already in env and any path we already added\n if path_tmp not in environ_paths and path_tmp not in checked_paths:\n checked_paths.append(path_tmp)\n prefix_str = os.pathsep.join(checked_paths)\n if prefix_str != '' and environ_paths:\n prefix_str += os.pathsep\n return prefix_str", "def getJobName():\n return os.environ['LCATR_JOB']", "def environ_key(name=None):\n try:\n return os.environ[name]\n except KeyError:\n return None", "def create_cdn(tag_prefix, cdn_name=None, elb_domain=None,\n s3_logs_bucket=None,\n tls_priv_key=None, tls_fullchain_cert=None,\n region_name=None, dry_run=False):\n if not cdn_name:\n cdn_name = '%scloudfront' % _clean_tag_prefix(tag_prefix)\n cdn_client = boto3.client('cloudfront', region_name='us-east-1')\n domains = []\n\n default_cert_location = None\n if not default_cert_location:\n if tls_priv_key and tls_fullchain_cert:\n resp = _store_certificate(\n tls_fullchain_cert, tls_priv_key,\n tag_prefix=tag_prefix, region_name=region_name,\n dry_run=dry_run)\n default_cert_location = resp['CertificateArn']\n else:\n LOGGER.warning(\"default_cert_location is not set and there are no\"\\\n \" tls_priv_key and tls_fullchain_cert either.\")\n\n try:\n resp = cdn_client.create_distribution(\n DistributionConfig={\n 'CallerReference': datetime.datetime.now(),\n 'DefaultRootObject': 'index.html',\n 'Aliases': {\n 'Quantity': len(domains),\n 'Items': domains\n },\n 'Origins': {\n 'Quantity': 1,\n 'Items': [{\n 'Id': tag_prefix,\n 'DomainName': elb_domain,\n 'CustomOriginConfig': {\n 'HTTPPort': 80,\n 'HTTPSPort': 443,\n 'OriginProtocolPolicy': 'match-viewer',\n }\n }]\n },\n 'DefaultCacheBehavior': {\n 'TargetOriginId': tag_prefix,\n 'TrustedSigners': {\n 'Enabled': False,\n 'Quantity': 0\n },\n 'ViewerProtocolPolicy': 'redirect-to-https',\n },\n #pylint:disable=line-too-long\n #https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PriceClass.html\n 'PriceClass': 'XXX',\n 'Enabled': True,\n 'ViewerCertificate': {\n #https://aws.amazon.com/premiumsupport/knowledge-center/associate-ssl-certificates-cloudfront/\n 'CloudFrontDefaultCertificate': False,\n 'ACMCertificateArn': default_cert_location,\n 'SSLSupportMethod': 'sni-only'\n }\n })\n except botocore.exceptions.ClientError as err:\n raise", "def generate_cluster_stack_name(job):\n return 'cluster-%s----%s' % (job.compute_resource.id, job.id)", "def host(self):\n return self._environ.get('HTTP_HOST', '')", "def bucket_website_url(self) -> str:\n ...", "def bucket_website_domain_name(self) -> str:\n return jsii.get(self, \"bucketWebsiteDomainName\")", "def bucket_website_domain_name(self) -> str:\n return jsii.get(self, \"bucketWebsiteDomainName\")", "def baseurl(request):\n if request.is_secure():\n scheme = 'https://'\n else:\n scheme = 'http://'\n\n return {'BASE_URL': scheme + request.get_host(),}", "def set_normal_environment(self):\n if 'RUSTUP_DIST_SERVER' in os.environ:\n self._download_url = os.environ['RUSTUP_DIST_SERVER']\n else:\n self._download_url = 'https://static.rust-lang.org'", "def set_env_var(self):\n\n list_env_vars = self.config.items('environment_variables')\n for env_var in list_env_vars:\n os.environ[env_var[0].upper()] = env_var[1]", "def production():\n env.settings = 'production'\n env.hosts = ['db.tribapps.com'] \n env.user = 'newsapps'\n env.s3_bucket = 'media.apps.chicagotribune.com'", "def overwrite_environment_variable(self, key, value):\n if value is not None:\n self._printer(\"$env:{0} = \\\"{1}\\\"\".format(key, value))\n else:\n self._printer(\"$env:{0} = \\\"\\\"\".format(key))", "def environment(self, environment):\n\n self._set_field(\"environment\", environment.get_json())", "def elastic_cloud_sso_default_url(self) -> str:\n return pulumi.get(self, \"elastic_cloud_sso_default_url\")", "def get_prefix(self) -> str:\n return self.env_type.value + '_'", "def get_ami_keyname ( app_name ) :\n return app_name + '.ami'", "def test_metadata_cache_uri_set_via_env_vars(monkeypatch, caplog):\n ENV_METADATA_CACHE_URI = environ_names_and_sections[NAME_METADATA_CACHE_URI][0]\n ENV_AQUARIUS_URL = deprecated_environ_names[NAME_AQUARIUS_URL][0]\n\n monkeypatch.delenv(ENV_METADATA_CACHE_URI, raising=False)\n monkeypatch.delenv(ENV_AQUARIUS_URL, raising=False)\n config = Config()\n metadata_cache_uri = config.metadata_cache_uri\n assert metadata_cache_uri == \"https://aquarius.marketplace.oceanprotocol.com\"\n\n monkeypatch.setenv(ENV_METADATA_CACHE_URI, \"https://custom-aqua.uri\")\n config = Config()\n assert config.metadata_cache_uri == \"https://custom-aqua.uri\"\n\n monkeypatch.setenv(ENV_AQUARIUS_URL, \"https://another-aqua.url\")\n with pytest.raises(ValueError):\n Config()\n\n monkeypatch.delenv(ENV_METADATA_CACHE_URI)\n config = Config()\n assert config.metadata_cache_uri == \"https://another-aqua.url\"\n assert (\n \"Config: AQUARIUS_URL envvar is deprecated. Use METADATA_CACHE_URI instead.\"\n in caplog.text\n )", "def hostname_for_event(self, clean_server_name, agentConfig):\n uri = urlsplit(clean_server_name)\n if '@' in uri.netloc:\n hostname = uri.netloc.split('@')[1].split(':')[0]\n else:\n hostname = uri.netloc.split(':')[0]\n if hostname == 'localhost':\n hostname = self.hostname\n return hostname", "def _key(\n service=None, # type: Optional[str]\n env=None, # type: Optional[str]\n ):\n # type: (...) -> str\n service = service or \"\"\n env = env or \"\"\n return \"service:\" + service + \",env:\" + env", "def flows_endpoint_envvar_callback(default_value: str) -> str:\n return os.getenv(\"GLOBUS_AUTOMATE_FLOWS_ENDPOINT\", default_value)", "def underlying_url(self):\n return 'http://{}:{}'.format(names.azure_url(self.dns_name), self.port)", "def escape_env_var(varname: str) -> str:\n varletters = list(varname.upper())\n if not varletters[0].isalpha():\n varletters[0] = \"_\"\n for i, c in enumerate(varletters):\n if not c.isalnum() and c != \"_\":\n varletters[i] = \"_\"\n return \"\".join(varletters)", "def set_platform_gs_prefix(self, gs_url):\n self.buildurl_gs_prefix = gs_url # pragma: no cover", "def build_endpoint_prefix(self):\n if not sanity.validate_api_hostname(self.api_host):\n error_message = \"Bad API hostname: %s\" % self.api_host\n raise CloudPassageValidation(error_message)\n prefix = \"https://\" + self.api_host + \":\" + str(self.api_port)\n return prefix", "def _set_from_env(name, context, default):\n if default is _DEFAULT_ARG and name not in os.environ:\n return\n\n context[name] = os.environ.get(name, default)", "def __get_host(self) -> str:\n\t\treturn os.getenv('FLASK_DRIVER_HOST', '0.0.0.0')", "def test_set_name_through_init(self) -> None:\n\n given = self.test_name\n expected = given\n\n helper = EnvironmentVariableHelper(given)\n actual = helper.name\n\n self.assertEqual(expected, actual)" ]
[ "0.64996815", "0.5894681", "0.5874034", "0.5585658", "0.5544347", "0.54832244", "0.5323281", "0.5320591", "0.52773994", "0.52695954", "0.5262594", "0.5224099", "0.5203325", "0.5185465", "0.51792514", "0.51767623", "0.51628804", "0.5161422", "0.51356816", "0.5127793", "0.51247144", "0.51175785", "0.5108756", "0.5074495", "0.5066988", "0.50429916", "0.5025823", "0.5013766", "0.5010329", "0.50011206", "0.49543235", "0.4924638", "0.4919425", "0.49173647", "0.49059397", "0.48965967", "0.48918205", "0.48910215", "0.48794422", "0.48697582", "0.48673168", "0.48396164", "0.48369467", "0.4833765", "0.48304868", "0.48301584", "0.48256335", "0.48229066", "0.48193812", "0.48192146", "0.47954386", "0.47831506", "0.47824094", "0.47722167", "0.4755817", "0.47473243", "0.47416002", "0.47396332", "0.4732736", "0.4732736", "0.47276294", "0.47268978", "0.47204512", "0.4719684", "0.4718805", "0.4718796", "0.47149926", "0.4714877", "0.47141358", "0.46942708", "0.4685873", "0.46776012", "0.46739632", "0.46739626", "0.46738443", "0.4667197", "0.46585295", "0.46575895", "0.46498808", "0.46498808", "0.4647995", "0.4643932", "0.46371308", "0.4634912", "0.46338138", "0.46307054", "0.4619797", "0.46189645", "0.46177316", "0.46145344", "0.4613928", "0.46096072", "0.45984253", "0.45920736", "0.45906729", "0.45898366", "0.4583651", "0.4583298", "0.45828798", "0.45800894" ]
0.52725095
9
Your description for this environment.
def description(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "description")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def describe(self):\r\n print( self.name + \" is here!\" )\r\n print( self.description )", "def description(self):\n pass", "def description(self):\n pass", "def description():", "def description(self) -> str:\n pass", "def describe(self):\n print(self.description)", "def describe(self):\n print(self.description)", "def help_description():\n pass", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def get_description(self) -> str:\n pass", "def description(self):\n return self.settings['description']", "def Description(self) -> str:", "def Description(self) -> str:", "def _description(self):\n return None", "def get_description(self):\n pass", "def describe(self) -> str:", "def description(self):", "def description(self) -> str:\n raise NotImplementedError", "def description(self) -> str:\n raise NotImplementedError", "def description(self) -> str:\n raise NotImplementedError", "def describe(self):\n return ''", "def description(self) -> str:\r\n raise NotImplementedError", "def description(self) -> str:\r\n raise NotImplementedError", "def description(self) -> str:\r\n raise NotImplementedError", "def description(cls) -> str:\n\n return cls.__doc__ or \"\"", "def define_description(self):\n self._description = 'NODDI-based processing of DWI datasets.'", "def description(self) -> str:\n return self.data['description']", "def get_description(self):\r\n return self.__description", "def description(self) -> str:\n return self._description", "def description(self) -> str:\n return self._description", "def description(self) -> str:\n return self._description", "def description(self) -> str:\n return self._description", "def description(self) -> str:\n return self._description", "def description(self) -> str:\n return self._description", "def description(self) -> str:\n return self._description", "def description(self) -> str:\n return self._description", "def description(self):\n return (self.__doc__ or \"\").strip()", "def get_description(self):\n print(\"This Iron door.\")", "def get_description():\n raise NotImplementedError", "def get_description(self):\n return self.__description", "def get_description():\n desc = dict()\n desc[\"cache\"] = 3600\n desc[\"data\"] = True\n desc[\n \"description\"\n ] = \"\"\"This plot is not meant for interactive use, but a backend for\n SPS plots.\n \"\"\"\n desc[\"arguments\"] = [\n dict(\n type=\"text\",\n name=\"pid\",\n default=\"202012300005-KDVN-WWUS83-SPSDVN\",\n label=\"IEM generated up to 35 char product identifier:\",\n ),\n dict(\n type=\"int\",\n default=0,\n name=\"segnum\",\n label=\"Product Segment Number (starts at 0):\",\n ),\n ]\n return desc", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description", "def description(self):\n return self._description" ]
[ "0.7659912", "0.7643785", "0.7643785", "0.75917745", "0.7568413", "0.75415725", "0.75415725", "0.7419379", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.72338927", "0.72166294", "0.7203367", "0.7203367", "0.7194439", "0.7192609", "0.7190733", "0.7167787", "0.70630354", "0.70630354", "0.70630354", "0.70424354", "0.70071614", "0.70071614", "0.70071614", "0.6987163", "0.6966524", "0.6959199", "0.6948282", "0.6916723", "0.6916723", "0.6916723", "0.6916723", "0.6916723", "0.6916723", "0.6916723", "0.6916723", "0.69063497", "0.68894", "0.6887233", "0.68697923", "0.68688613", "0.68648523", "0.68648523", "0.68648523", "0.68648523", "0.68648523", "0.68648523", "0.68648523", "0.68648523", "0.68648523", "0.68648523", "0.68648523", "0.68648523", "0.68648523", "0.68648523", "0.68648523", "0.68648523", "0.68648523", "0.68648523", "0.68648523", "0.68648523", "0.68648523", "0.68648523", "0.68648523", "0.68648523", "0.68648523", "0.68648523", "0.68648523", "0.68648523", "0.68648523", "0.68648523", "0.68648523" ]
0.0
-1
A unique name for the environment.
def environment_name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "environment_name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def name(self):\n return self._env_name", "def name(self):\n return get_env_name(self.tool_name,\n self._python,\n self._requirements,\n self._tagged_env_vars)", "def env_name(self):\n return f\"{self.project_name}-{self.stage}\"", "def unique_name():\n return \"unique-{0}\".format(uuid.uuid4())", "def environment_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"environment_name\")", "def get_name():\n return config.APP_NAME", "def environment_label(self) -> str:\n return self._environment_label", "def env_name(pre_chars='(', post_chars=')'):\n env_path = builtins.__xonsh_env__.get('VIRTUAL_ENV', '')\n if len(env_path) == 0 and xp.ON_ANACONDA:\n env_path = builtins.__xonsh_env__.get('CONDA_DEFAULT_ENV', '')\n env_name = os.path.basename(env_path)\n if env_name:\n return pre_chars + env_name + post_chars", "def generate_unique_name():\n return 'titanic-' + str(get_mac())", "def createname(cls):\n name = config.get(\"pyzombie_filesystem\", \"execbase\")\n name = \"{0}_{1}\".format(name, datetime.utcnow().strftime(\"%Y%jT%H%M%SZ\"))\n if os.path.isdir(Executable.execdirpath(name)):\n #Need to handle the rare case of duplicate resource names---this\n #will happen all the time in testing, but rarely in production.\n index = 0\n altname = \"{0}_{1:03}\".format(name, index)\n while os.path.isdir(Executable.execdirpath(altname)):\n index = index + 1\n altname = \"{0}_{1:03}\".format(name, index)\n name = altname\n return name", "def dir_name(self):\n name = get_env_name(self.tool_name,\n self._python,\n self._requirements,\n self._tagged_env_vars,\n build=True)\n return hashlib.md5(name.encode('utf-8')).hexdigest()", "def create_env_name(name):\n new_name = re.sub(r'''(?<=[a-z])([A-Z])''', '_\\\\1', name)\n new_name = re.sub(r'\\W+', '_', new_name)\n new_name = re.sub(r'_{2,}', '_', new_name)\n return new_name.upper().strip(\"_\")", "def unique_project_name(prefix: str = \"selenium-project\"):\n return f'{prefix}-{uuid.uuid4().hex[:8]}'", "def exp_name() -> str:\n return 'test-' + str(uuid.uuid4())", "def create_r53_name ( base_name, name ) :\n env = get_env_type( base_name )\n if env :\n env = env.lower( )\n if ( env == 'prod' ) :\n return name\n\n return name + '.' + env", "def name(self):\n if not self._name:\n prefix = self.random.choice(['Desktop'] * 4 + ['Laptop'])\n self._name = '{}-{}'.format(prefix, ''.join(\n self.random.choice(string.ascii_uppercase + string.digits) for _ in range(7)))\n return self._name", "def uniqueName(self):\n return \"{0}::{1}\".format(self.name(), str(self.uid))", "def name(self) -> str:\n name = self._config[\"name\"]\n assert isinstance(name, str) # noqa: S101\n return name", "def get_name():\n return __name__", "def scope_name_generator():\n return 'mock_' + str(uuid()).lower()[:16]", "def get_env_name(self):\n if self.options.environment:\n return self.options.environment\n elif os.environ.get(\"JUJU_ENV\"):\n return os.environ['JUJU_ENV']\n\n env_ptr = os.path.join(self.juju_home, \"current-environment\")\n if os.path.exists(env_ptr):\n with open(env_ptr) as fh:\n return fh.read().strip()\n\n with open(self.get_env_conf()) as fh:\n conf = yaml.safe_load(fh.read())\n if not 'default' in conf:\n raise ConfigError(\"No Environment specified\")\n return conf['default']", "def app_name(self): # pylint:disable=function-redefined\n return self._app_name", "def stack_name(self):\n stack_name = getattr(self, '__stack_name', None)\n if (\n self.args.stack_name and\n not stack_name\n ):\n stack_name = self.args.stack_name\n elif not stack_name:\n stack_name = \"nephoria-stack-\" + str(int(time.time()))\n\n setattr(self, '__stack_name', stack_name)\n return stack_name", "def name(self):\n return self._unique_id", "def generate_agent_name():\n\n return '{0}-{1}'.format(\n defaults.CLOUDIFY_AGENT_PREFIX,\n uuid.uuid4())", "def environment_variable_string(self, name):\n return \"$(\" + name + \")\"", "def unique_identifier(self) -> str:\n return pulumi.get(self, \"unique_identifier\")", "def _app_id(self):\n return '{}-{}'.format(self.config['app']['name'],\n self.config['app']['version'])", "def stackname(self):\n return self.BASE_NAME.format(**self.conf)", "def fixture_make_unique_name():\n def _make_unique_name(prefix):\n return f\"{prefix}{time.time_ns()}\"\n return _make_unique_name", "def unique_id(self) -> str:\n return f\"{self._host}_{self._name}_{self._unique_id}\"", "def generate_unique_job_name(self, name='no_name_job'):\n # TODO: Make it more suitable for disk paths. (no *, -)\n from base64 import urlsafe_b64encode\n name = os.path.basename(name)\n return \"_\".join([os.path.split(name)[1], urlsafe_b64encode(os.urandom(3))])", "def name(self) -> str:\n return self.config_name or self.host_name or self.dev_id or DEVICE_DEFAULT_NAME", "def secret_name(self) -> str:\n return self._secret_name", "def app_name(self) -> str:\n return self._app_name", "def key_name(self) -> str:\n return pulumi.get(self, \"key_name\")", "def name() -> str:\n pass", "def unique_pipeline_name(self):\n return self._unique_pipeline_name", "def get_name(app):\n from uuid import uuid4 as uuid\n return (f'accelpy_{app[\"application\"][\"product_id\"]}'\n f'_{str(uuid()).replace(\"-\", \"\")[:8]}')", "def unique_id(self):\r\n name_slug = slugify(self._name)\r\n return f\"{name_slug}\"", "def name(self):\n return self.config.get('name') or f\"{self.id.replace('_', ' ').title()}\"", "def name(self):\n return self.config.get('name') or f\"{self.id.replace('_', ' ').title()}\"", "def name(self):\n return _version._NAME # pylint: disable=protected-access", "def app_name(self):\n return self._app_name", "def name(cls):\n\n system = platform.system()\n\n # Apply system map\n if system in NAME_MAP:\n system = NAME_MAP[system]\n\n return system", "def unique_id(self):\n return self.config_entry.entry_id + \"stg\"", "def hub_name(self):\n return self._props[\"persistent_identifiers\"].get(self._hub_name_prop)", "def name(self):\n return self.config[\"name\"]", "def _get_environment(cls):\n return cls.__name__.lower()", "def unique_id(self) -> str:\n return \"_\".join([self._name, \"climate\"])", "def get_identity_name(identity_kind: str = GLOBAL_APPLICATION_CONFIGURATION) -> str:\n identity_name = os.environ.get(identity_kind)\n if identity_name:\n return identity_name\n # TODO: Add discovery here? This can probably be inferred.\n # Need to be careful because not all users may have IAM privileges.\n # -kmp 31-Aug-2022\n context = \"\"\n account_number = os.environ.get('ACCOUNT_NUMBER')\n if account_number:\n context = f\" in account {account_number}\"\n raise ValueError(f\"There is no default identity name available for {identity_kind}{context}.\")", "def uid():\n\n # Ambient variables for each operating system\n us = {'Windows': 'USERNAME', 'Linux': 'USER'}\n\n u = us.get(platform.system())\n return os.environ.get(u)", "def _DefaultAppId():\n return os.getenv('APPLICATION_ID', '_')", "def tracing_name(name: Optional[str] = None) -> str:\n if name is None:\n name = settings.SERVICE_NAME\n return f\"{name}.{settings.ENVIRONMENT.lower()}\"", "def unique_id(self) -> str:\n return f\"{self._mac}_tracker\"", "def get_raw_server_name():\n from google.appengine.api import app_identity\n return '%s.%s.appspot.com' % (os.environ[\n 'CURRENT_VERSION_ID'].split('.')[0], app_identity.get_application_id())", "def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_name\")", "def name(self):\n return self._config_name", "def name(self):\n return f\"{habitica.DOMAIN}_{self._name}_{self._sensor_name}\"", "def name(self):\n return \"docker_{}\".format(self._var_name.lower())", "def get_prefix(self) -> str:\n return self.env_type.value + '_'", "def name(self):\n return '{} {}'.format(self.client_name, self.variable)", "def getSiteName():\n return os.environ['SITENAME']", "def unique_id(self):\n return f\"bhyve:program:{self._program_id}\"", "def name(self):\n return 'Greenlet-%d' % (self.minimal_ident,)", "def get_daemon_name(cls):\n\n return os.environ[cls.CLOUDIFY_DAEMON_NAME_KEY]", "def name(self):\n if self._name is None:\n return(self.default_name)\n else:\n return(self._name)", "def name(self):\n return self._config.get(CONF_NAME)", "def common_name(self) -> str:\n return pulumi.get(self, \"common_name\")", "def generate_name(self):\n name = self._generate_test_name()\n while self.exists(name):\n name = self._generate_test_name()\n return name", "def generate_workflow_name(self) -> str:\n pass", "def name(self):\n\n return self.manifest[\"name\"]", "def name(self):\n return \"docker_{}_{}\".format(self._name, self._var_name)", "def scope_name():\n return tf.compat.v1.get_variable_scope().name", "def windows_name(self):\n return self._windows_name", "def systematic_name(self):\n\n return self._systematic_name", "def unique_id(self) -> str | None:\n return self._config[CONF_ID]", "def get_conda_env_name():\n env_name = os.popen('echo $CONDA_DEFAULT_ENV').read().strip()\n if env_name == '' or env_name == '$CONDA_DEFAULT_ENV':\n env_name = 'base'\n logging.info('Anaconda environment: ' + env_name)\n return env_name", "def scope_name():\n return tf.get_variable_scope().name", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")" ]
[ "0.79486114", "0.75685275", "0.7347741", "0.71612895", "0.7083821", "0.6932168", "0.6866546", "0.6833046", "0.6826362", "0.6653989", "0.66474885", "0.658179", "0.6504934", "0.6459993", "0.64504695", "0.64480555", "0.6429557", "0.6411307", "0.6345184", "0.63397086", "0.6339165", "0.63373244", "0.6329448", "0.6316292", "0.6283438", "0.6276137", "0.62651104", "0.6246931", "0.6246098", "0.6243113", "0.6233861", "0.62178844", "0.6201397", "0.6181658", "0.6180973", "0.6180921", "0.61273086", "0.6121998", "0.6099323", "0.609278", "0.60762846", "0.60762846", "0.6067836", "0.6059612", "0.60575026", "0.6048293", "0.60245144", "0.60178083", "0.6017752", "0.59991586", "0.5995394", "0.5991397", "0.5987283", "0.59839946", "0.59566945", "0.5949623", "0.594911", "0.594911", "0.59423923", "0.59404844", "0.5936355", "0.59363186", "0.5927052", "0.5926956", "0.5921183", "0.5916729", "0.59123844", "0.59062517", "0.5904315", "0.590411", "0.5900752", "0.58977413", "0.5886261", "0.58815414", "0.5879888", "0.5877846", "0.5876726", "0.587451", "0.5870073", "0.5868866", "0.5862747", "0.5862747", "0.5862747", "0.5862747", "0.5862747", "0.5862747", "0.5862747", "0.5862747", "0.5862747", "0.5862747", "0.5862747", "0.5862747", "0.5862747", "0.5862747", "0.5862747", "0.5862747", "0.5862747", "0.5862747", "0.5862747", "0.5862747" ]
0.69573826
5
The Amazon Resource Name (ARN) of an existing IAM role to be used as the environment's operations role.
def operations_role(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "operations_role")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def role_arn(self) -> str:\n return pulumi.get(self, \"role_arn\")", "def iam_role_arn(self) -> str:\n return pulumi.get(self, \"iam_role_arn\")", "def role_arn(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"role_arn\")", "def role_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"role_arn\")", "def execution_role_arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"execution_role_arn\")", "def execution_role_arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"execution_role_arn\")", "def execution_role_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"execution_role_arn\")", "def execution_role_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"execution_role_arn\")", "def role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_arn\")", "def role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_arn\")", "def role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_arn\")", "def role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_arn\")", "def role_arn(self) -> Optional[str]:\n return pulumi.get(self, \"role_arn\")", "def role_arn(self) -> Optional[str]:\n return pulumi.get(self, \"role_arn\")", "def execution_role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"execution_role_arn\")", "def operations_role(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"operations_role\")", "def role(self) -> str:\n\n assert self.data is not None\n return self.data[\"role\"][\"name\"]", "def role(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"role\")", "def role(self) -> str:\n return pulumi.get(self, \"role\")", "def alarm_role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"alarm_role_arn\")", "def get_redshift_iam_role_arn(iam, iam_role_name):\n return iam.get_role(RoleName=iam_role_name)['Role']['Arn']", "def account_role_arn(self, role, partition='aws'):\n if not role or role.startswith(\"arn:aws\"):\n return role\n if not role.startswith(\"role/\"):\n role = \"role/\" + role\n return \"arn:{0}:iam::{1}:{2}\".format(partition, self.account_id, role)", "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "def invocation_role(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"invocation_role\")", "def data_api_role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"data_api_role_arn\")", "def invoke_arn(self) -> str:\n return pulumi.get(self, \"invoke_arn\")", "def resource_arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_arn\")", "def role(self) -> aws_cdk.aws_iam.IRole:\n return self._values.get('role')", "def role_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"role_id\")", "def service_role(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_role\")", "def role_arn_lookup(session, role_name):\n if session is None:\n return None\n\n client = session.client('iam')\n response = client.get_role(RoleName=role_name)\n if response is None:\n return None\n else:\n return response['Role']['Arn']", "def role(self):\n\n return self._role", "def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:\n return self._values.get(\"role\")", "def role(self):\n return self._role", "def role(self):\n return self._role", "def role(self):\n return self._role", "def create_arn_role(iam):\n print(\"Attaching policy to IAM role\")\n iam.attach_role_policy(RoleName=DWH_IAM_ROLE_NAME,\n PolicyArn=\"arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess\")['ResponseMetadata']['HTTPStatusCode']\n roleArn = iam.get_role(RoleName=DWH_IAM_ROLE_NAME)['Role']['Arn']\n #print(\"ARN role:\", roleArn)\n return roleArn", "def target_role(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"target_role\")", "def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:\n return self._values.get('role')", "def invocation_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"invocation_role\")", "def invocation_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"invocation_role\")", "def getRole(self):\n return _libsbml.ReferenceGlyph_getRole(self)", "def get_role(self):\n return self.role", "def action_role(self) -> aws_cdk.aws_iam.IRole:\n return self._values.get(\"action_role\")", "def resource_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_arn\")", "def resource_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_arn\")", "def resource_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_arn\")", "def resource_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_arn\")", "def resource_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_arn\")", "def resource_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_arn\")", "def resource_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_arn\")", "def resource_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_arn\")", "def getRoleString(self):\n return _libsbml.SpeciesReferenceGlyph_getRoleString(self)", "def role(self):\r\n roles = {\r\n 'student': u'Student',\r\n 'staff': u'Administrator',\r\n 'instructor': u'Instructor',\r\n }\r\n return roles.get(self.system.get_user_role(), u'Student')", "def user_role(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"user_role\")", "def get_name(self):\n return '-'.join(self._name_parts +\n [self.role.name, self.scenario.name])", "def role_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"role_id\")", "def service_role(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"service_role\")", "def resource_arn(self) -> Optional[str]:\n return pulumi.get(self, \"resource_arn\")", "def target_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"target_role\")", "def target_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"target_role\")", "def management_account_arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"management_account_arn\")", "def _generateRoleName(self, obj, **args):\n # Subclasses must override this.\n return []", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def getRole(self):\n return _libsbml.SpeciesReferenceGlyph_getRole(self)", "def role_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_id\")", "def user_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_role\")", "def user_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_role\")", "def _get_role(self):\n return self.__role", "def delivery_channel_assume_role_arn(self) -> str:\n return pulumi.get(self, \"delivery_channel_assume_role_arn\")", "def delivery_channel_assume_role_arn(self) -> str:\n return pulumi.get(self, \"delivery_channel_assume_role_arn\")", "def delivery_channel_assume_role_arn(self) -> str:\n return pulumi.get(self, \"delivery_channel_assume_role_arn\")", "def cloud_formation_execution_role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:\n return self._values.get(\"cloud_formation_execution_role\")", "def __repr__(self):\n return '<Role %r>' % self.name" ]
[ "0.7612605", "0.75487196", "0.7544673", "0.7489435", "0.74707127", "0.74707127", "0.7373255", "0.7373255", "0.734374", "0.734374", "0.734374", "0.734374", "0.72820526", "0.72820526", "0.7252721", "0.71177185", "0.70236653", "0.69610494", "0.6929272", "0.6908325", "0.69070154", "0.675902", "0.64789414", "0.64789414", "0.64789414", "0.64569306", "0.6324452", "0.63126695", "0.62839365", "0.62788206", "0.62187576", "0.61906755", "0.6186575", "0.61759996", "0.61687213", "0.6168652", "0.6168652", "0.6168652", "0.6160947", "0.61550874", "0.61328584", "0.6118572", "0.6118572", "0.61026704", "0.6096583", "0.60411924", "0.60100806", "0.5989643", "0.5989643", "0.5989643", "0.5989643", "0.5989643", "0.5989643", "0.5989643", "0.5967992", "0.59406656", "0.5929459", "0.5920024", "0.59032047", "0.5893856", "0.58760834", "0.58475745", "0.58475745", "0.58246005", "0.5799816", "0.572815", "0.572815", "0.572815", "0.572815", "0.572815", "0.572815", "0.572815", "0.572815", "0.572815", "0.572815", "0.572815", "0.572815", "0.572815", "0.572815", "0.572815", "0.572815", "0.572815", "0.572815", "0.572815", "0.572815", "0.572815", "0.572815", "0.572815", "0.572815", "0.572815", "0.5711425", "0.56995225", "0.56788856", "0.56788856", "0.56510264", "0.56323785", "0.56323785", "0.56323785", "0.5630336", "0.56277376" ]
0.6835086
21
Keyvalue pairs defining configuration options for this environment, such as the instance type.
def option_settings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentOptionSettingArgs']]]]: return pulumi.get(self, "option_settings")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def define_options(self):\n return {\n 'basename': OptionDef(required=True, default_value='keycloak', allowed_types=[str]),\n 'namespace': OptionDef(required=True, default_value='default', allowed_types=[str]),\n 'config': {\n 'service_port': OptionDef(required=True, default_value=8080, allowed_types=[int]),\n 'realm_import': OptionDef(format=OptionDefFormat.KDATA_VOLUME, allowed_types=[str, bytes, KData_Secret]),\n 'proxy_address_forwarding': OptionDef(format=OptionDefFormat.KDATA_ENV,\n allowed_types=[bool, *KDataHelper_Env.allowed_kdata()]),\n 'frontend_url': OptionDef(allowed_types=[str]),\n 'admin': {\n 'user': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'password': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, KData_Secret]),\n },\n 'db': {\n 'vendor': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'addr': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'port': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[int, *KDataHelper_Env.allowed_kdata()]),\n 'database': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'schema': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'user': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'password': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, KData_Secret]),\n },\n },\n 'container': {\n 'keycloak': OptionDef(required=True, default_value='quay.io/keycloak/keycloak:11.0.2', allowed_types=[str]),\n },\n 'kubernetes': {\n 'resources': {\n 'deployment': OptionDef(allowed_types=[Mapping]),\n }\n },\n }", "def options(self) -> Mapping[str, str]:\n return pulumi.get(self, \"options\")", "def get_config(self):\n return {\"name\": self.name, \"tunable\": self.tunable}", "def get_options(cls):\n return {\n \"name\": str,\n ConfigOption(\"install_files\", default=None): Or(None, list),\n ConfigOption(\"timeout\", default=300): int,\n ConfigOption(\"log_regexps\", default=None): Or(None, list),\n ConfigOption(\"stdout_regexps\", default=None): Or(None, list),\n ConfigOption(\"stderr_regexps\", default=None): Or(None, list),\n ConfigOption(\"file_logger\", default=None): Or(None, str),\n ConfigOption(\"async_start\", default=False): bool,\n ConfigOption(\"report_errors_from_logs\", default=False): bool,\n ConfigOption(\"error_logs_max_lines\", default=10): int,\n ConfigOption(\"path_cleanup\", default=True): bool,\n ConfigOption(\"pre_start\", default=None): validate_func(\"driver\"),\n ConfigOption(\"post_start\", default=None): validate_func(\"driver\"),\n ConfigOption(\"pre_stop\", default=None): validate_func(\"driver\"),\n ConfigOption(\"post_stop\", default=None): validate_func(\"driver\"),\n }", "def default_config(cls) -> dict:\n return {\n \"observation\": {\n \"type\": \"Kinematics\"\n },\n \"action\": {\n \"type\": \"DiscreteMetaAction\"\n },\n \"simulation_frequency\": 15, # [Hz]\n \"policy_frequency\": 1, # [Hz]\n \"other_vehicles_type\": \"highway_env.vehicle.behavior.IDMVehicle\",\n \"screen_width\": 600, # [px]\n \"screen_height\": 150, # [px]\n \"centering_position\": [0.3, 0.5],\n \"scaling\": 5.5,\n \"show_trajectories\": False,\n \"render_agent\": True,\n \"offscreen_rendering\": os.environ.get(\"OFFSCREEN_RENDERING\", \"0\") == \"1\",\n \"manual_control\": False,\n \"real_time_rendering\": False\n }", "def _options(self):\n return", "def required_config_keys(self):\n return [\"options\", \"label_columns\", \"env\"]", "def define_options(self) -> Optional[Any]:\n return {\n 'basename': OptionDef(required=True, default_value='promtail', allowed_types=[str]),\n 'namespace': OptionDef(required=True, default_value='monitoring', allowed_types=[str]),\n 'config': {\n 'prometheus_annotation': OptionDef(required=True, default_value=False, allowed_types=[bool]),\n 'promtail_config': OptionDef(allowed_types=[str, ConfigFile]),\n 'loki_url': OptionDef(allowed_types=[str]),\n 'authorization': {\n 'serviceaccount_create': OptionDef(required=True, default_value=True, allowed_types=[bool]),\n 'serviceaccount_use': OptionDef(allowed_types=[str]),\n 'roles_create': OptionDef(required=True, default_value=True, allowed_types=[bool]),\n 'roles_bind': OptionDef(required=True, default_value=True, allowed_types=[bool]),\n },\n },\n 'container': {\n 'promtail': OptionDef(required=True, default_value='grafana/promtail:2.0.0', allowed_types=[str]),\n },\n 'kubernetes': {\n 'resources': {\n 'daemonset': OptionDef(allowed_types=[Mapping]),\n }\n },\n }", "def configuration_keys(self):\n return ['dispname', 'decker', 'binning']", "def configuration_keys(self):\n return ['dispname', 'decker', 'binning']", "def configuration_keys(self):\n return ['dispname', 'decker', 'binning']", "def config_pairs(self):\n return [(\"templater\", self.name), (\"dbt\", self.dbt_version)]", "def get_config(self):\n config = {'name': self.name, 'trainable': self.trainable}\n if hasattr(self, '_batch_input_shape'):\n config['batch_input_shape'] = self._batch_input_shape\n if hasattr(self, 'dtype'):\n config['dtype'] = self.dtype\n return config", "def config(self) -> Dict[str, Any]:", "def default_options(cls) -> Dict:\n return {}", "def options(self):\n options = {\n o.name: getattr(self, o.name)\n for o in _OPTIONS\n }\n return options", "def get_config(self):\n config = {\n 'window_length': self.window_length,\n 'ignore_episode_boundaries': self.ignore_episode_boundaries,\n }\n return config", "def get_config(self):\n return {}", "def get_config(self):\n return {'name': self.name, 'dtype': self.dtype}", "def instance_configuration(self) -> Optional[pulumi.Input['ServiceInstanceConfigurationArgs']]:\n return pulumi.get(self, \"instance_configuration\")", "def instance_configuration(self) -> Optional[pulumi.Input['ServiceInstanceConfigurationArgs']]:\n return pulumi.get(self, \"instance_configuration\")", "def get_configuration_template(self):\n return {'EXAMPLE_KEY_1': \"Example value\",\n 'EXAMPLE_KEY_2': [\"Example\", \"Value\"]\n }", "def get_config(self):\n\n # these are all that is needed to rebuild this class\n config = dict(hidden_size=self.hidden_size,\n word_embedding=self.word_embedding,\n detection_embedding=self.detection_embedding,\n mode=self.mode,\n decoder_pos_emb=self.decoder_pos_emb,\n ** self.kwargs)\n\n base_config = super(RegionFeature, self).get_config()\n return dict(list(base_config.items()) +\n list(config.items()))", "def get_configuration_template(self):\n return {'EXAMPLE_KEY_1': \"Example value\",\n 'EXAMPLE_KEY_2': [\"Example\", \"Value\"]\n }", "def get_config(self) -> Dict[str, Any]:\n return {\n 'num_classes': self.num_classes,\n 'name': self.name,\n 'dtype': self.dtype,\n 'sparse_y_true': self.sparse_y_true,\n 'sparse_y_pred': self.sparse_y_pred,\n 'axis': self.axis,\n }", "def options(self):\n\t\treturn self.config_parser.options(self.section_name)", "def get_config(self) -> dict:\n out = {}\n for name in self.CONFIG_DEFAULTS:\n out[name] = self.__getattribute__(name)\n return out", "def default_options(cls) -> Dict:\n options = super().default_options()\n # scaling factor for temperature adaptation\n options['eta'] = 100\n # controls the adaptation degeneration velocity of the temperature\n # adaption.\n options['nu'] = 1e3\n\n return options", "def configuration(self):\n # type: () -> Dict[str, str]\n return {\n 'source': self.source,\n 'location': self.location,\n 'uri': self.uri,\n 'options': self.options,\n 'cache_dir': self.cache_dir\n }", "def settings(self) -> Dict[str, Any]:\n return {}", "def requested_config_vals():\n return {} # no extra values needed", "def _opt_config(self):\n return self._opt_method.config", "def _get_options(self) -> Dict[str, Any]:\n # TODO: handle holidays as well\n return {\n \"growth\": self.growth,\n \"changepoints\": self.changepoints and list(self.changepoints.astype('str')),\n \"n_changepoints\": self.n_changepoints,\n \"changepoint_range\": self.changepoint_range,\n \"changepoint_prior_scale\": self.changepoint_prior_scale,\n \"mcmc_samples\": self.mcmc_samples,\n \"interval_width\": self.interval_width,\n \"uncertainty_samples\": self.uncertainty_samples,\n \"yearly_seasonality\": self.yearly_seasonality,\n \"weekly_seasonality\": self.weekly_seasonality,\n \"daily_seasonality\": self.daily_seasonality,\n \"seasonality_mode\": self.seasonality_mode,\n \"seasonality_prior_scale\": self.seasonality_prior_scale,\n\n \"seasonalities\": self.seasonalities,\n \"extra_regressors\": self.extra_regressors\n }", "def options(self):\n return self.__options", "def _config(self):\r\n return (\r\n self.destructive,\r\n self.output_type,\r\n self.seed,\r\n )", "def options(self):\n return self._options", "def options(self):\n return self._options", "def options(self):\n return self._options", "def options(self):\n return self._options", "def options(self):\n return self._options", "def options(self):\r\n return self._options", "def _set_instance_config(self):\n\t\t\n\t\tif \"PARAMETERS_NAME\" in self.config.keys():\n\t\t\tlogger.info(\"You specified your own PARAMETERS_NAME, I will use it.\")\n\t\telse:\n\t\t\tself.config[\"PARAMETERS_NAME\"] = self._get_params_filepath()\n\t\t\n\t\tif \"FILTER_NAME\" in self.config.keys():\n\t\t\tlogger.info(\"You specified your own FILTER_NAME, I will use it.\")\n\t\telse:\n\t\t\tself.config[\"FILTER_NAME\"] = self._get_conv_filepath()\n\t\t\n\t\t\n\t\tif \"CATALOG_NAME\" in self.config.keys():\n\t\t\tlogger.warning(\"You specified your own CATALOG_NAME, but I will *NOT* use it !\")\n\t\t\tdel self.config[\"CATALOG_NAME\"]\n\n\t\tif \"PSF_NAME\" in self.config.keys():\n\t\t\tlogger.info(\"You specified your own PSF_NAME, I will use it.\")\n\t\telse:\n\t\t\tself.config[\"PSF_NAME\"] = self._get_psf_filepath()", "def settings(self):\n return {}", "def get_external_opts_configs(cls):\n return [\n ExternalOptConfig(\n name=\"auth_uri\",\n module_str=\"keystoneclient.middleware.auth_token\",\n group=\"keystone_authtoken\"),\n ExternalOptConfig(\n name=\"admin_user\",\n module_str=\"keystoneclient.middleware.auth_token\",\n group=\"keystone_authtoken\"),\n ExternalOptConfig(\n name=\"admin_password\",\n module_str=\"keystoneclient.middleware.auth_token\",\n group=\"keystone_authtoken\"),\n ExternalOptConfig(\n name=\"admin_tenant_name\",\n module_str=\"keystoneclient.middleware.auth_token\",\n group=\"keystone_authtoken\"),\n ]", "def test_options_structure(self):\r\n deploy = self.wsgiDeploy()\r\n expected_keys = self.DEFAULTS.keys()\r\n actual_keys = deploy.options.keys()\r\n self.assertListEqual(expected_keys, actual_keys)", "def getServerOptions(self):\n pass", "def config(self):\n return {}", "def setting(self):\n return {\n \"num_examples\": self.num_examples,\n \"dim_data\": self.dim_data,\n \"dim_target\": self.dim_target,\n \"info\": self.info,\n }", "def _options(self):\r\n xmi_file = self.tb_xmi_file_name.GetValue()\r\n topic = self.tb_pragma.GetValue()\r\n package = self.tb_package.GetValue()\r\n header = self.tb_file_header.GetValue()\r\n target_folder = self.tb_target_folder.GetValue()\r\n encoding = self.tb_encoding.GetValue()\r\n \r\n return {\"topic\" : topic, \r\n \"package\" : package, \r\n \"header\" : header, \r\n \"target_folder\" : target_folder,\r\n \"encoding\" : encoding,\r\n \"xmi_file\" : xmi_file}", "def options(self):\n return list(self._moptions.keys())", "def default_configs(cls):\n config = super().default_configs()\n config.update(\n {\n \"entry_type\": \"ft.onto.base_ontology.Document\",\n \"model_name\": \"ktrapeznikov/biobert_v1.1_pubmed_squad_v2\",\n \"question\": \"Where do I live\",\n \"max_answer_len\": 15,\n \"cuda_devices\": -1,\n \"handle_impossible_answer\": False,\n }\n )\n return config", "def config(self):\n raise NotImplementedError", "def options(self, parser, env):\n pass", "def _default_config(cls):\n return dict()", "def get_full_configuration(self) -> dict:\n\n return {\n input_instance.key: input_instance.argument_value\n for input_instance in self.all_input_instances\n }", "def _config_options(self):\n self._config_sortable(self._sortable)\n self._config_drag_cols(self._drag_cols)", "def __init__(self):\n super(t_var_size_Options, self).__init__()\n self.options = {\n t_var_size_Options.BOARD_ID : {'value' : '', 'name' : 'board_id' },\n t_var_size_Options.CURRENT_STATE : {'value' : '', 'name' : 'state' },\n t_var_size_Options.PATTERN_WAVE : {'value' : '', 'name' : 'pat_wav' }\n }", "def readOptions(self):\n get = command_line.CommandLineParser().get_option\n if get('nosplash')!=None:\n self.temp_configuration.showSplash = bool(get('nosplash'))\n if get('debugsignals')!=None:\n self.temp_configuration.debugSignals = bool(get('debugsignals'))\n if get('dotVistrails')!=None:\n self.temp_configuration.dotVistrails = get('dotVistrails')\n #in theory this should never happen because core.configuration.default()\n #should have done this already\n #if not self.configuration.check('dotVistrails'):\n # self.configuration.dotVistrails = system.default_dot_vistrails()\n # self.temp_configuration.dotVistrails = system.default_dot_vistrails()\n if get('multiheads')!=None:\n self.temp_configuration.multiHeads = bool(get('multiheads'))\n if get('maximized')!=None:\n self.temp_configuration.maximizeWindows = bool(get('maximized'))\n if get('movies')!=None:\n self.temp_configuration.showMovies = bool(get('movies'))\n if get('cache')!=None:\n self.temp_configuration.useCache = bool(get('cache'))\n if get('verbose')!=None:\n self.temp_configuration.verbosenessLevel = get('verbose')\n if get('noninteractive')!=None:\n self.temp_configuration.interactiveMode = \\\n not bool(get('noninteractive'))\n if get('workflowinfo') != None:\n self.temp_configuration.workflowInfo = str(get('workflowinfo'))\n if get('dumpcells') != None:\n self.temp_configuration.spreadsheetDumpCells = get('dumpcells')\n if get('pdf') != None:\n self.temp_configuration.spreadsheetDumpPDF = get('pdf')\n if get('workflowgraph') != None:\n self.temp_configuration.workflowGraph = str(get('workflowgraph'))\n if get('evolutiongraph') != None:\n self.temp_configuration.evolutionGraph = str(get('evolutiongraph'))\n if get('executeworkflows') != None:\n self.temp_configuration.executeWorkflows = \\\n bool(get('executeworkflows'))\n if get('showspreadsheetonly') != None:\n self.temp_configuration.showSpreadsheetOnly = \\\n bool(get('showspreadsheetonly'))\n # asking to show only the spreadsheet will force the workflows to\n # be executed\n if get('reviewmode') != None:\n self.temp_configuration.reviewMode = bool(get('reviewmode'))\n\n if self.temp_configuration.showSpreadsheetOnly and not self.temp_configuration.reviewMode:\n self.temp_configuration.executeWorkflows = True\n \n self.temp_db_options = InstanceObject(host=get('host'),\n port=get('port'),\n db=get('db'),\n user=get('user'),\n parameters=get('parameters')\n )\n if get('nologger')!=None:\n self.temp_configuration.nologger = bool(get('nologger'))\n if get('quickstart') != None:\n self.temp_configuration.staticRegistry = str(get('quickstart'))\n if get('detachHistoryView')!= None:\n self.temp_configuration.detachHistoryView = bool(get('detachHistoryView'))\n self.input = command_line.CommandLineParser().positional_arguments()", "def instance_configuration(self) -> pulumi.Output['outputs.ServiceInstanceConfiguration']:\n return pulumi.get(self, \"instance_configuration\")", "def configuration(self) -> Dict[str, Any]:\n return {self.__class__.__qualname__: self._param_names}", "def config_init(self):\n\n game_opts = [\n\n # Execution Options\n ('debug',False), # Toggle Debug Messaging\n ('log_path',False), # Turn on logging (w/path)\n ('log_lvl',logging.DEBUG), # Set log level\n\n # World Generation Options\n ('flex_limit',3) # Sets the maximum variance\n\n ]\n\n # Attempts to pull each value from the configuration\n # if not in config, the default value defined above\n # is set instead\n for opt in game_opts:\n try:\n setattr(self,opt[0],self.conf.conf_dict[opt[0]])\n except:\n setattr(self,opt[0],opt[1])\n continue", "def config(self): # type: () -> t.Dict[str, t.Any]\n return self.inspection['Config']", "def get_engine_conf():\n result = {}\n for k,v in pylons.config.iteritems():\n if not k.startswith('sqlalchemy.'):\n continue\n k = k[11:]\n if k in BOOL_OPTIONS:\n result[k] = asbool(v)\n elif k in INT_OPTIONS:\n try:\n result[k] = int(v)\n except ValueError:\n reason = 'config sqlalchemy.%s is not an integer: %s'\n raise ValueError(reason % (k,v))\n else:\n result[k] = v\n return result", "def get_options(self) -> dict:\n assert self.task\n task_options = {\n **self.task.get_task_options(),\n **self.expr.task_expr_options,\n **self.task_options,\n }\n return task_options", "def get_options(cls, mode):\n return dict(\n (key, properties[mode])\n for key, properties in cls.__register.items()\n if mode in properties\n )", "def process_config(self):\n driver_options = self.config['service']['options']\n process_config = {\n 'assembler_config': {\n 'driver_options': driver_options,\n 'teststep_config': self.teststep_config,\n 'testcase_config': self.config['reader_settings']['test_case']['keys'],\n },\n 'assembly_config': self.config['assembly_settings'],\n }\n return process_config", "def default_configs(cls):\n config = super().default_configs()\n config.update(\n {\n \"entry_type\": None,\n \"attribute\": None,\n \"index_annotation\": None,\n }\n )\n return config", "def config(self):\n return {\"input_dims\": self.dims, \"output_dims\": self.output_dims, \"mapping\": self.mapping}", "def config(self) -> Dict[str, Any]:\r\n attr_conf = {attr: getattr(self._qda, attr, None) for attr in self.attributes}\r\n return {\"params\": self._qda.get_params(), \"attributes\": attr_conf}", "def as_dict(self) -> dict:\n return self._config", "def _create_options(self):\n self._OPTIONS = {}", "def get_config(self):\n config = self._kwargs.copy()\n config.update({\n 'metric': self.__class__.__name__,\n 'name': self.name,\n 'output_names': self.output_names,\n 'label_names': self.label_names})\n return config", "def beaker_session_options(self):\n\n session_data_dir = os.path.join(self.APP_DIR, self.SESSION_DIR)\n\n # TODO: Options which should be made into PyWy application options\n options = dict(type='file',\n data_dir=session_data_dir,\n auto=True)\n\n # Standard options\n options.update(invalidate_corrupt=True, timeout=None,\n secret=None, log_file=None,)\n\n return options", "def configuration():", "def create_options(self):\n return []", "def config(self):\n annotations = IAnnotations(self.context)\n return annotations.get(CONFIGURATION_KEY, {})", "def config(self) -> dict:\n return self._configs", "def config(self) -> dict:\n return self._config", "def set_env_config(self):\n self.env_config = {\n # ===== STANDARD ARGUMENTS ======\n \"n_agents\": 4, # Number of non-planner agents\n \"world_size\": [15, 15], # [Height, Width] of the env world\n \"episode_length\": 1000, # Number of time-steps per episode\n # In multi-action-mode, the policy selects an action for each action\n # subspace (defined in component code)\n # Otherwise, the policy selects only 1 action\n \"multi_action_mode_agents\": False,\n \"multi_action_mode_planner\": True,\n # When flattening observations, concatenate scalar & vector observations\n # before output\n # Otherwise, return observations with minimal processing\n \"flatten_observations\": False,\n # When Flattening masks, concatenate each action subspace mask\n # into a single array\n # Note: flatten_masks = True is recommended for masking action logits\n \"flatten_masks\": True,\n # ===== COMPONENTS =====\n # Which components to use\n \"components\": [\n # (1) Building houses\n {\"Build\": {}},\n # (2) Trading collectible resources\n {\"ContinuousDoubleAuction\": {\"max_num_orders\": 5}},\n # (3) Movement and resource collection\n {\"Gather\": {}},\n ],\n # ===== SCENARIO =====\n # Which scenario class to use\n \"scenario_name\": \"uniform/simple_wood_and_stone\",\n # (optional) kwargs of the chosen scenario class\n \"starting_agent_coin\": 10,\n \"starting_stone_coverage\": 0.10,\n \"starting_wood_coverage\": 0.10,\n }\n\n # Create an environment instance from the config\n self.env = foundation.make_env_instance(**self.env_config)", "def ssh_config(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n print(utils.config_ssh_string(self.config_ssh))", "def get_config(self):\n config = {\n 'multichannel': self._multichannel,\n 'complex_part': self._complex_part\n }\n base_config = super().get_config()\n return {**base_config, **config}", "def declare_final_options(self):\n return {}", "def configuration_keys(self):\n return ['filter1', 'echangle', 'xdangle']", "def get_options(self):\n return []", "def config(self) -> pulumi.Input['ConfigArgs']:\n return pulumi.get(self, \"config\")", "def __init__(self) -> None:\n self.config: dict[str, str | int] = {}", "def getConfiguration(self):\n raise NotImplementedError", "def default_configs(cls):\n config = super().default_configs()\n config.update({\"model\": \"openie\"})\n return config", "def expected_instance_datastore_configs(instance_id):\n instance = instance_info.dbaas.instances.get(instance_id)\n datastore_type = instance.datastore['type']\n datastore_test_configs = CONFIG.get(datastore_type, {})\n return datastore_test_configs.get(\"configurations\", {})", "def get_config(self):\n config = super(Sc2Policy, self).get_config()\n config['eps'] = self.eps\n config['testing'] = self.testing\n return config", "def data_dict(self) -> dict:\n return self.design.renderers.gds.options", "def _store_options(self):\n logger.debug(\"Storing general ReplicaExchange options...\")\n self._reporter.write_dict('options', self.options)", "def buildersConf() :\n return dict(_buildersConf)", "def config(self):\n pass", "def config(self):\n pass", "def options_set(self):\n\n global OPTIONS\n OPTIONS.append(config.ENABLE(self.threaded))\n OPTIONS.append(config.ENABLE(self.datasaver))\n OPTIONS.append(self.language)", "async def config_options(self, ctx):\n embeds = []\n for names in zip_longest(*(iter(sorted(self.bot.config.public_keys)),) * 15):\n description = \"\\n\".join(\n f\"`{name}`\" for name in takewhile(lambda x: x is not None, names)\n )\n embed = Embed(\n title=\"Available configuration keys:\",\n color=self.bot.main_color,\n description=description,\n )\n embeds.append(embed)\n\n session = EmbedPaginatorSession(ctx, *embeds)\n await session.run()", "def config(self):\n return self[CONFIG_KEY]", "def get_global_config(self, **kwargs):\n return {}", "def _generate_options(self, **kwargs: Any) -> dict:\n raise NotImplementedError", "def add_options(_config):\n settings = [\n [\"cache_worker\", bool, lambda x: x in [True, False], False, False],\n [\n \"kube_deployment\",\n str,\n lambda x: x in [\"pod\", \"container\", \"file\", \"call\"],\n False,\n \"pod\",\n ],\n [\n \"kube_version\",\n str,\n lambda _: [\"v1.27.0\", \"v1.26.0\", \"v1.25.0\", \"v1.24.0\", \"v1.23.0\"],\n False,\n \"v1.27.0\",\n ],\n ]\n return settings" ]
[ "0.6606235", "0.6575482", "0.65406615", "0.653029", "0.65184087", "0.6499363", "0.64822006", "0.6451657", "0.63914824", "0.63914824", "0.63914824", "0.6329847", "0.6306304", "0.6300337", "0.62472105", "0.6245023", "0.62443054", "0.6222554", "0.6206321", "0.6206157", "0.6206157", "0.6187218", "0.61868906", "0.6183911", "0.6165328", "0.6145835", "0.6114041", "0.61090696", "0.60777813", "0.6063271", "0.605843", "0.60429835", "0.60301036", "0.60097593", "0.6009214", "0.59978133", "0.59978133", "0.59978133", "0.59978133", "0.59978133", "0.5982042", "0.5960859", "0.59561384", "0.59556913", "0.5944024", "0.5932322", "0.592373", "0.59197485", "0.59195566", "0.5916001", "0.59116334", "0.591076", "0.59031004", "0.5898619", "0.5883547", "0.58732873", "0.58694816", "0.58542246", "0.585274", "0.582954", "0.5813403", "0.5768291", "0.57679164", "0.5765074", "0.5764877", "0.57609624", "0.5753653", "0.5752422", "0.5749056", "0.57455915", "0.57443553", "0.57330865", "0.57121056", "0.5708459", "0.5702759", "0.5695993", "0.569536", "0.56952685", "0.56923103", "0.5676661", "0.56709754", "0.5669488", "0.5665913", "0.56603086", "0.56572115", "0.56513953", "0.5651036", "0.5650666", "0.5643954", "0.5638649", "0.5625597", "0.5623037", "0.56158745", "0.5614566", "0.5614566", "0.561331", "0.5611718", "0.5606954", "0.56064445", "0.5606073", "0.56058997" ]
0.0
-1
The Amazon Resource Name (ARN) of the custom platform to use with the environment.
def platform_arn(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "platform_arn")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def platform_arn(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"platform_arn\")", "def platform(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"platform\")", "def platform():\n return \"micaz\"", "def PlatformName():\n if override_platform_name:\n return override_platform_name\n if IsWindows():\n return 'win32'\n if IsLinux():\n return 'linux'\n if IsMac():\n return 'mac'\n raise NotImplementedError('Unknown platform \"%s\".' % sys.platform)", "def product(self):\n return self.appName", "def name(cls):\n\n system = platform.system()\n\n # Apply system map\n if system in NAME_MAP:\n system = NAME_MAP[system]\n\n return system", "def GetOSName():\n return Config.osName_", "def environment_label(self) -> str:\n return self._environment_label", "def platform_config_filename(region, account_prefix, prod):\n return 'infra/platform-config/%s/%s/%s.json' % (\n account_prefix, \"prod\" if prod else \"dev\", region\n )", "def GetPlatform(self):\n arch = \"None\"\n # check architecture name\n if \"CMTCONFIG\" in os.environ:\n arch = os.environ[\"CMTCONFIG\"]\n elif \"SCRAM_ARCH\" in os.environ:\n arch = os.environ[\"SCRAM_ARCH\"]\n return arch", "def brand(self):\n return \"Nest Labs\"", "def name(self) -> str:\n return f\"{self.platform_name} {self._sensor_name}\"", "def platform(self):\n return self.random.choice([\n 'Laptop', \n 'Desktop', \n 'Workstation', \n 'Server', \n 'Virtual Machine', \n 'Container', \n 'Micro-Service', \n 'Droplet', \n 'SaaS'\n ])", "def os_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"os_name\")", "def app_image_config_arn(self) -> Optional[str]:\n return pulumi.get(self, \"app_image_config_arn\")", "def application_arn(self) -> Optional[str]:\n return pulumi.get(self, \"application_arn\")", "def platform_num(self) -> str:\n return pulumi.get(self, \"platform_num\")", "def architecture(self) -> str:\n return pulumi.get(self, \"architecture\")", "def architecture(self) -> str:\n return pulumi.get(self, \"architecture\")", "def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_name\")", "def platform(self):\n # type: () -> string_types\n return self._platform", "def architecture_name(self):\n return get_architecture_name(self.architecture)", "def get_os() -> str:\n system = platform.system().lower()\n\n if system == \"linux\":\n machine = os.uname().machine\n if machine.startswith(\"arm\") or machine.startswith(\"aarch\"):\n system = \"pi\"\n\n return system + \"_\" + platform.architecture()[0]", "def get_name():\n return config.APP_NAME", "def platform(self, return_str=True):\n architecture = self.arch(\"docker\")\n host_platform = self.osversion() + \"/\" + architecture\n if return_str:\n return host_platform.lower()\n return self.parse_platform(host_platform)", "def get_os_name(cls):\n return cls.get_os_type().name", "def application_name(self) -> Optional[str]:\n return pulumi.get(self, \"application_name\")", "def environment_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"environment_name\")", "def arn(self) -> str:\n return pulumi.get(self, \"arn\")", "def arn(self) -> str:\n return pulumi.get(self, \"arn\")", "def arn(self) -> str:\n return pulumi.get(self, \"arn\")", "def arn(self) -> str:\n return pulumi.get(self, \"arn\")", "def arn(self) -> str:\n return pulumi.get(self, \"arn\")", "def get_ami_keyname ( app_name ) :\n return app_name + '.ami'", "def OverridePlatformName(name):\n global override_platform_name\n override_platform_name = name", "def getPlatform(self):\n\t\treturn None", "def product_name(self):\n buf = (ctypes.c_char * self.MAX_BUF_SIZE)()\n self._dll.JLINKARM_EMU_GetProductName(buf, self.MAX_BUF_SIZE)\n return ctypes.string_at(buf).decode()", "def brand(self) -> str:\n return self._config_entry.data.get(CONF_BRAND, DEFAULT_BRAND)", "def get_name(self):\n return \"catkin\"", "def arn(self) -> Optional[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> Optional[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> Optional[str]:\n return pulumi.get(self, \"arn\")", "def master_name(self):\n return self._LAUNCHPAD_NAME", "def app_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"app_name\")", "def get_chromeos_platform_name():\r\n try:\r\n platform = cros_config.call_cros_config_get_output('/ name', utils.run)\r\n if platform == '':\r\n platform = get_board()\r\n return platform\r\n except:\r\n logging.info(\"Not found\")\r\n return -1", "def platform_info(self):\n return platform.uname()._asdict()", "def name(self) -> str:\n return self.dev.label", "def environment_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"environment_name\")", "def _get_osname():\n osname = sys.platform.lower()\n if osname == \"linux2\":\n osname = \"linux\"\n return osname", "def getApplicationName(self) -> unicode:\n ...", "def name(self) -> str:\n return self.config_name or self.host_name or self.dev_id or DEVICE_DEFAULT_NAME", "def resource_arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_arn\")", "def get_platform(self):\n return self._platform", "def platform():\n if 'OS' in gyp_defines():\n if 'android' in gyp_defines()['OS']:\n return 'android'\n else:\n return gyp_defines()['OS']\n elif IsWindows():\n return 'win'\n elif IsLinux():\n return 'linux'\n else:\n return 'mac'", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def os(self) -> str:\n os = None\n attributes_tag = self._get_tag(\"parallelcluster:attributes\")\n if attributes_tag:\n # tag is in the form \"{BaseOS}, {Scheduler}, {Version}, {Architecture}\"\n os = attributes_tag.split(\",\")[0].strip()\n return os", "def platform_version(self) -> Optional[str]:\n return pulumi.get(self, \"platform_version\")", "def invoke_arn(self) -> str:\n return pulumi.get(self, \"invoke_arn\")", "def resource_arn(self) -> Optional[str]:\n return pulumi.get(self, \"resource_arn\")", "def application_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_name\")", "def app_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_name\")", "def resource_name(self) -> Optional[str]:\n return pulumi.get(self, \"resource_name\")", "def resource_name(self) -> Optional[str]:\n return pulumi.get(self, \"resource_name\")", "def name(self):\n return get_env_name(self.tool_name,\n self._python,\n self._requirements,\n self._tagged_env_vars)", "def os_label(self):\n\n return self._os_label", "def application(self):\n\n if not self._applicationDef:\n raise NotValidPlatformException(\n 'No application definition is available. Are you sure you are running on Platform.sh?'\n )\n return self._applicationDef", "def name(self):\n return self._env_name", "def get_platform():\r\n platforms = [\r\n \"Android\",\r\n \"Linux.RaspberryPi\",\r\n \"Linux\",\r\n \"XBOX\",\r\n \"Windows\",\r\n \"ATV2\",\r\n \"IOS\",\r\n \"OSX\",\r\n \"Darwin\",\r\n ]\r\n\r\n for platform in platforms:\r\n if xbmc.getCondVisibility('System.Platform.%s' % platform):\r\n return platform\r\n return \"Unknown\"", "def get_launch_name():\n\n if product_type == \"RHEL7\":\n launch_name = \"Errata-{0}_{1}_{2}_{3}_{4}_{5}CDN\".format(errata_id, product_type, variant, arch, test_level, cdn)\n \n elif product_type == \"RHEL8\":\n launch_name = \"Errata-{0}_{1}_{2}_{3}_{4}CDN\".format(errata_id, product_type, arch, test_level, cdn)\n\n return launch_name", "def anki(self) -> str:\n\n return self._get_via_app_bundle(path=\"/Applications/Anki.app\")", "def name(self) -> str:\n return self._device.name or self._device.mac", "async def osname(self):\n\n await self.bot.say(box(system(), 'Bash'))", "def get_system_name(self):\n\n\t\treturn self.__system_name", "def name(self):\n return self._config.backend_name", "def management_account_arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"management_account_arn\")", "def get_version_key(platform):\n\tversion_key = ''\n\tif platform.lower() == 'sdk':\n\t\tversion_key = 'sdk_version'\n\telif platform.lower() == 'server':\n\t\tversion_key = 'server_version'\n\telif platform.lower() == 'android' or platform.lower() == 'ios':\n\t\tversion_key = 'app_version'\n\treturn version_key", "def provider_name(self):\n return self.resource_class.name", "def provider_name(self):\n return self.resource_class.name", "def _format_platform(platform, release, architecture=None):\n rep = f\"{_PLATFORMS[platform]} {release}\"\n if architecture is None or architecture == default.architecture:\n return rep\n return f\"{rep} ({architecture})\"", "def get_ami_keypath ( env_type ) :\n return \"/builds/esp/\" + env_type + \"/current/\"", "def component_arn(self) -> Optional[str]:\n return pulumi.get(self, \"component_arn\")", "def component_arn(self) -> Optional[str]:\n return pulumi.get(self, \"component_arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")" ]
[ "0.7836344", "0.6630474", "0.66273826", "0.6609992", "0.6324445", "0.6285871", "0.623594", "0.61513615", "0.60816205", "0.60486674", "0.60041595", "0.598996", "0.59880674", "0.596188", "0.59613025", "0.5961109", "0.5934884", "0.5923574", "0.5923574", "0.59224755", "0.59224755", "0.59121084", "0.58728135", "0.58551097", "0.58390915", "0.58338064", "0.5812323", "0.5772314", "0.5770205", "0.5742552", "0.5742552", "0.5742552", "0.5742552", "0.5742552", "0.57322156", "0.5729232", "0.5720229", "0.57174546", "0.57080966", "0.57061106", "0.5698364", "0.5698364", "0.5698364", "0.5694725", "0.56793827", "0.56661457", "0.566556", "0.5658823", "0.5650267", "0.56356", "0.56234056", "0.5614525", "0.56060565", "0.55959004", "0.5575527", "0.5570208", "0.5570208", "0.5570208", "0.5570208", "0.556691", "0.556073", "0.55485886", "0.554689", "0.5534316", "0.5515695", "0.55130464", "0.55130464", "0.5509648", "0.5508013", "0.5505917", "0.5503292", "0.5502804", "0.54957014", "0.5486349", "0.54807585", "0.5477803", "0.5474466", "0.5474107", "0.547107", "0.54597354", "0.5457258", "0.5457258", "0.5456004", "0.54533577", "0.54491043", "0.54491043", "0.5428213", "0.5428213", "0.5428213", "0.5428213", "0.5428213", "0.5428213", "0.5428213", "0.5428213", "0.5428213", "0.5428213", "0.5428213", "0.5428213", "0.5428213", "0.5428213" ]
0.77357316
1
The name of an Elastic Beanstalk solution stack (platform version) to use with the environment.
def solution_stack_name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "solution_stack_name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solution_stack_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"solution_stack_name\")", "def stackname(self):\n return self.BASE_NAME.format(**self.conf)", "def stack_name(self) -> str:\n return jsii.get(self, \"stackName\")", "def env_name(self):\n return f\"{self.project_name}-{self.stage}\"", "def stack_name(self):\n stack_name = getattr(self, '__stack_name', None)\n if (\n self.args.stack_name and\n not stack_name\n ):\n stack_name = self.args.stack_name\n elif not stack_name:\n stack_name = \"nephoria-stack-\" + str(int(time.time()))\n\n setattr(self, '__stack_name', stack_name)\n return stack_name", "def name(self):\n return self._env_name", "def kernel_name():\n return \"python3\"", "def stack_name(self) -> str:\n return self._values.get(\"stack_name\")", "def name_tag(resource_name):\n return Join(\"\", [Ref('AWS::StackName'), '-', resource_name])", "def name(self):\n return get_env_name(self.tool_name,\n self._python,\n self._requirements,\n self._tagged_env_vars)", "def environment_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"environment_name\")", "def pipeline_stack_name(self) -> str:\n return self._values.get(\"pipeline_stack_name\")", "def get_name():\n return config.APP_NAME", "def environment_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"environment_name\")", "def environment_label(self) -> str:\n return self._environment_label", "def stack_name(self) -> typing.Optional[str]:\n return self._values.get(\"stack_name\")", "def get_egg_name():\n global eggname\n if not eggname:\n version = local('git describe --abbrev=4', capture=True)\n if version:\n version = '%s-%s' % (version, datetime.datetime.today().strftime('%Y%m%d'))\n eggname = APP_NAME + '-%s-py%s.egg' % (version.replace('-', '_'), python_version)\n return eggname", "def stackname(self):\n raise NotImplementedError", "def env_name(pre_chars='(', post_chars=')'):\n env_path = builtins.__xonsh_env__.get('VIRTUAL_ENV', '')\n if len(env_path) == 0 and xp.ON_ANACONDA:\n env_path = builtins.__xonsh_env__.get('CONDA_DEFAULT_ENV', '')\n env_name = os.path.basename(env_path)\n if env_name:\n return pre_chars + env_name + post_chars", "def python_name(self):\n return self.requirement.name", "def docker_image_tag(self, app):\n return f\"briefcase/{app.bundle}.{app.app_name.lower()}:{app.target_vendor}-{app.target_codename}\"", "def get_soc_name():\n return get_soc_spec(\"SOC_VERSION\")", "def _get_deployment_flavor():\n flavor = cfg.CONF.paste_deploy.flavor\n return '' if not flavor else ('-' + flavor)", "def get_launch_name():\n\n if product_type == \"RHEL7\":\n launch_name = \"Errata-{0}_{1}_{2}_{3}_{4}_{5}CDN\".format(errata_id, product_type, variant, arch, test_level, cdn)\n \n elif product_type == \"RHEL8\":\n launch_name = \"Errata-{0}_{1}_{2}_{3}_{4}CDN\".format(errata_id, product_type, arch, test_level, cdn)\n\n return launch_name", "def container_image_name(registry, component_name, version):\n if version is None:\n image = component_name + ':dev'\n else:\n image = '%s/%s:%s' % (registry, component_name, version)\n\n return image", "def stage_name(self) -> str:\n return pulumi.get(self, \"stage_name\")", "def stage_name(self) -> str:\n return pulumi.get(self, \"stage_name\")", "def generate_cluster_stack_name(job):\n return 'cluster-%s----%s' % (job.compute_resource.id, job.id)", "def version_name(self) -> str:\n return pulumi.get(self, \"version_name\")", "def bucket_dual_stack_domain_name(self) -> str:\n return jsii.get(self, \"bucketDualStackDomainName\")", "def bucket_dual_stack_domain_name(self) -> str:\n return jsii.get(self, \"bucketDualStackDomainName\")", "def _get_environment():\n namespace = current_app.config.get('POD_NAMESPACE').lower()\n if namespace.endswith('dev'):\n return 'DEV'\n if namespace.endswith('test'):\n return 'TEST'\n if namespace.endswith('tools'):\n return 'SANDBOX'\n return ''", "def bucket_dual_stack_domain_name(self) -> str:\n ...", "def get_stack(stack_name, region, cfn_client=None):\n if not cfn_client:\n cfn_client = boto3.client(\"cloudformation\", region_name=region)\n return cfn_client.describe_stacks(StackName=stack_name).get(\"Stacks\")[0]", "def _branch_name(cls, version: Version) -> str:\n suffix = version.public[len(version.base_version) :]\n components = version.base_version.split(\".\") + [suffix]\n if suffix != \"\" and not (\n suffix.startswith(\"rc\")\n or suffix.startswith(\"a\")\n or suffix.startswith(\"b\")\n or suffix.startswith(\".dev\")\n ):\n raise ValueError(f\"Unparseable pants version number: {version}\")\n return \"{}.{}.x\".format(*components[:2])", "def get_res_name():\n return os.getenv(\"RESOURCES_VERSION\", \"res_0.0\")", "def ecr_image_name(dev_account_id, region, component_name, version):\n return '%s.dkr.ecr.%s.amazonaws.com/%s:%s' % (dev_account_id, region, component_name, 'dev' if version is None else version)", "def get_version_name(self):\n\t\tif self.have_metadata is False:\n\t\t\tself._get_metadata()\n\t\t\tself.have_metadata = True\n\n\t\ttry:\n\t\t\treturn self.keyinfo['context_tags'].attrs['version_name']\n\t\texcept:\n\t\t\treturn None", "def get_raw_server_name():\n from google.appengine.api import app_identity\n return '%s.%s.appspot.com' % (os.environ[\n 'CURRENT_VERSION_ID'].split('.')[0], app_identity.get_application_id())", "def _k8s_service_name(self):\n return \"{}-ssh-service\".format(self.app.name)", "def stage_name(self) -> str:\n return self._stage_name", "def deploy_stack():\n build = \"sam build --use-container --manifest src/images/requirements.txt\"\n local(build)\n\n #package = f\"sam package --template-file template.yaml --output-template-file \\\n # packaged.yaml --s3-bucket {env.bucket_name} --region {env.aws_region}\"\n #local(package)\n\n deploy = f\"sam deploy --stack-name storge-machine-service \\\n --s3-bucket {env.bucket_name}\\\n --parameter-overrides env=dev --capabilities CAPABILITY_IAM CAPABILITY_AUTO_EXPAND --region {env.aws_region}\"\n #deploy = \"sam deploy\"\n local(deploy)", "def get_stack_info():\n\n response = cloudformation.describe_stacks(\n StackName=config.CLOUDFORMATION_STACK_NAME\n )\n return response['Stacks'][0]", "def product(self):\n return self.appName", "def get_distrib_name():\n distrib, version, codename = _get_release_infos()\n \n if distrib.startswith('Red Hat Enterprise Linux'):\n return 'RHEL'\n elif distrib.startswith('CentOS'):\n return 'CentOS'\n else:\n abort(\"OS not supported.\")", "def PlatformName():\n if override_platform_name:\n return override_platform_name\n if IsWindows():\n return 'win32'\n if IsLinux():\n return 'linux'\n if IsMac():\n return 'mac'\n raise NotImplementedError('Unknown platform \"%s\".' % sys.platform)", "def create_r53_name ( base_name, name ) :\n env = get_env_type( base_name )\n if env :\n env = env.lower( )\n if ( env == 'prod' ) :\n return name\n\n return name + '.' + env", "def deployment_group_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"deployment_group_name\")", "def version_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version_name\")", "def describe_stack(cfn, stack_name):\n try:\n stacks = cfn.describe_stacks(StackName=stack_name)[\"Stacks\"]\n return stacks[0]\n except ClientError as e:\n if \"does not exist\" not in e.response[\"Error\"][\"Message\"]:\n raise e\n return None", "def fhir_version_name(fhir_version):\n major_version = int(fhir_version.split('.')[0])\n\n if major_version < 3:\n return 'dstu2'\n elif (major_version >= 3) and (major_version < 4):\n return 'stu3'\n elif (major_version >= 4) and (major_version < 5):\n return 'r4'\n else:\n raise Exception(\n f'Invalid fhir version supplied: {fhir_version}! No name exists '\n 'for the supplied fhir version.'\n )", "def getSlavename():", "def python_branch():\n\n return _sys_version()[2]", "def compliance_pack_name(self) -> str:\n return pulumi.get(self, \"compliance_pack_name\")", "def GetOSName():\n return Config.osName_", "def _get_upgrade_stack():\n from resource_management.libraries.functions.default import default\n direction = default(\"/commandParams/upgrade_direction\", None)\n stack_name = default(\"/hostLevelParams/stack_name\", None)\n stack_version = default(\"/commandParams/version\", None)\n\n if direction and stack_name and stack_version:\n return (stack_name, stack_version)\n\n return None", "def stage_name(self) -> str:\n return self._values.get(\"stage_name\")", "def get_package_name(self):\n return self.name + '-' + self.version", "def name(self):\n return _version._NAME # pylint: disable=protected-access", "def name(self):\r\n return self.setuptools_requirement.project_name", "def storage_appliance_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"storage_appliance_name\")", "def get_env_name(self):\n if self.options.environment:\n return self.options.environment\n elif os.environ.get(\"JUJU_ENV\"):\n return os.environ['JUJU_ENV']\n\n env_ptr = os.path.join(self.juju_home, \"current-environment\")\n if os.path.exists(env_ptr):\n with open(env_ptr) as fh:\n return fh.read().strip()\n\n with open(self.get_env_conf()) as fh:\n conf = yaml.safe_load(fh.read())\n if not 'default' in conf:\n raise ConfigError(\"No Environment specified\")\n return conf['default']", "def deployment_environment(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"deployment_environment\")", "def name(self) -> str:\n return self.dev.label", "def get_app_hostname():\n if not is_running_on_app_engine() or is_running_on_localhost():\n return None\n\n version = modules.get_current_version_name()\n app_id = app_identity.get_application_id()\n\n suffix = 'appspot.com'\n\n if ':' in app_id:\n tokens = app_id.split(':')\n api_name = tokens[1]\n if tokens[0] == 'google.com':\n suffix = 'googleplex.com'\n else:\n api_name = app_id\n\n # Check if this is the default version\n default_version = modules.get_default_version()\n if version == default_version:\n return '{0}.{1}'.format(app_id, suffix)\n else:\n return '{0}-dot-{1}.{2}'.format(version, api_name, suffix)", "def brand(self):\n return \"Nest Labs\"", "def bucket_dual_stack_domain_name(self) -> typing.Optional[str]:\n return self._values.get('bucket_dual_stack_domain_name')", "def bundle_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bundle_name\")", "def bundle_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bundle_name\")", "def fullname(self):\n return \"{project}/{version}\".format(\n project=self.project.name, version=self.name\n )", "def get_image_name():\n try:\n return os.environ['AIRFLOW_IMAGE']\n except KeyError:\n raise Exception(\"Please provide docker image name to pytest using environment variable AIRFLOW_IMAGE\")", "def _app(self) -> str:\n return self.charm.app.name", "def get_product_name(self):\n sushy_system = self._get_sushy_system()\n return sushy_system.model", "def get_stack_domain_name(self, stack_name):\n cf_stack = stack(self.session)\n resources = cf_stack.get_stack_resources(stack_name, 'AWS::ApiGateway::DomainName')\n if not resources:\n return False\n return resources[0]", "def name(cls):\n\n system = platform.system()\n\n # Apply system map\n if system in NAME_MAP:\n system = NAME_MAP[system]\n\n return system", "def get_env_type ( base_name ) :\n return base_name.split( '-', 1 )[ 0 ]", "def getSiteName():\n return os.environ['SITENAME']", "def bundle_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"bundle_name\")", "def name(self):\n return self._config.backend_name", "def app_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"app_name\")", "def package_name(self) -> str:\n return pulumi.get(self, \"package_name\")", "def production_settings_name():\n if hasattr(SettingsType, 'AWS'):\n # Hawthorn and Ironwood\n return getattr(SettingsType, 'AWS')\n else:\n # Juniper and beyond.\n return getattr(SettingsType, 'PRODUCTION')", "def display_name(self) -> str:\n if self.is_verified:\n return f\"Verified Package {self.csharp_version}\"\n elif self.is_main:\n return \"main (unstable)\"\n else:\n return self.release_tag.replace(\"_\", \" \").title()", "def tracing_name(name: Optional[str] = None) -> str:\n if name is None:\n name = settings.SERVICE_NAME\n return f\"{name}.{settings.ENVIRONMENT.lower()}\"", "def app_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_name\")", "def git_service_name(self):\n return self._git_service_name", "def get_target_group_name(self, short_name):\n app_env = self.get_current_env()\n full_name = self.get_target_group_fully_qualified_name(short_name)\n namespace = self.config['namespace']\n\n if len(full_name) <= 32:\n return full_name\n elif len(namespace) + 10 <= 32:\n env_target_hash = hashlib.md5((short_name + app_env).encode()).hexdigest()[:9]\n return '{}-{}'.format(namespace, env_target_hash)\n else:\n return hashlib.md5(full_name.encode()).hexdigest()", "def get_product_name(self):\n system = self._get_host_details()\n return system['Model']", "def name(self):\n return self.application_tree['name']", "def get_name():\n return __name__", "def get_vm_image_name(self):\n return self.virtual_environment[self.T_I][self.T_I_N] if self.is_vm_image() else None", "def version():\n click.echo(u'shellfoundry version ' + pkg_resources.get_distribution(u'shellfoundry').version)", "def platform_config_filename(region, account_prefix, prod):\n return 'infra/platform-config/%s/%s/%s.json' % (\n account_prefix, \"prod\" if prod else \"dev\", region\n )", "def platform():\n return \"micaz\"", "def get_package_name(self):\n return self.name + '-' + self.version + '-' + self.release", "def container_name(self):\n pass", "def _get_engine_name(self):", "def docker_image_name(self):\n raise NotImplementedError", "def get_version_tag(self, version: str) -> str:\n return version", "def get_name(self):\n return \"%s.%s.%s.%s\" % (\n self.__module__, self.__class__.__name__, self.calc_meta(), self._get_os_name())" ]
[ "0.6850209", "0.6835407", "0.6518939", "0.65049684", "0.64019746", "0.6248095", "0.6167335", "0.615564", "0.6142327", "0.6139697", "0.6102911", "0.6087402", "0.60867625", "0.60640186", "0.6010799", "0.59820807", "0.592273", "0.58995575", "0.5882993", "0.58522046", "0.58299625", "0.5815258", "0.5814011", "0.5812235", "0.57710993", "0.5765567", "0.5765567", "0.5765216", "0.57643604", "0.5695329", "0.5695329", "0.56839085", "0.5674591", "0.5674161", "0.56532377", "0.56490034", "0.56309694", "0.56271917", "0.561327", "0.56101394", "0.5589469", "0.558715", "0.55527383", "0.55517864", "0.55415076", "0.55346364", "0.551927", "0.55187416", "0.55087084", "0.5502728", "0.5495725", "0.5495008", "0.5480565", "0.5475035", "0.547384", "0.5472076", "0.5469998", "0.54641587", "0.5462898", "0.54583347", "0.54409873", "0.5440924", "0.5430292", "0.53991085", "0.5392494", "0.53762156", "0.5352159", "0.5349355", "0.5349355", "0.53422755", "0.5342205", "0.534167", "0.53308344", "0.5320908", "0.53182954", "0.5317607", "0.5313681", "0.53108335", "0.52945507", "0.5281143", "0.5280017", "0.52708626", "0.5262653", "0.5260385", "0.5253272", "0.52522504", "0.52352047", "0.52324015", "0.5219345", "0.5214645", "0.5214562", "0.52133626", "0.52128005", "0.52115226", "0.52078074", "0.5203065", "0.52013797", "0.5200858", "0.5196014", "0.5191939" ]
0.68761265
0
Specifies the tags applied to resources in the environment.
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentTagArgs']]]]: return pulumi.get(self, "tags")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tags(self, tags):\n self._tags = tags", "def tags(self, tags):\n self._tags = tags", "def tags(self, tags):\n self._tags = tags", "def tags(self, tags):\n\n self._tags = tags", "def tags(self, tags):\n\n self._tags = tags", "def tags(self, tags):\n\n self._tags = tags", "def tags(self, tags):\n\n self._tags = tags", "def tags(self, tags):\n\n self._tags = tags", "def tags(self, tags):\n\n self._tags = tags", "def tags(self, tags):\n\n self._tags = tags", "def tags(self, tags):\n\n self._tags = tags", "def tags(self, tags):\n\n self._tags = tags", "def tags(self, tags):\n\n self._tags = tags", "def add_tags_to_resource(ResourceId=None, Tags=None):\n pass", "def tags(self, tags: List[Tag]):\n\n self._tags = tags", "def tags(self) -> Tags:\n return Tags(**dict(self.context.tags, **self.args.tags))", "def tags(self):\r\n return resources.Tags(self)", "def list_tags_for_resource(Resource=None):\n pass", "def describe_tags(resourceArns=None):\n pass", "def tag_resource(resourceArn=None, tags=None):\n pass", "def set_tags(self, tags, filename):\n return self.set_tags_batch(tags, [filename])", "def set_tags(self, tags):\n self.tags = []\n for tag in [t.strip() for t in tags.split(', ')]:\n self.tags.append(Tag(title=tag))", "def add_tags(ResourceArn=None, Tags=None):\n pass", "def set_tags(self, tags):\n self._tag.clear()\n\n for tag in tags:\n if tag not in self._tag:\n self._tag.append(tag)\n\n return self", "def tag_resource(ResourceArn=None, Tags=None):\n pass", "def tag_resource(ResourceArn=None, Tags=None):\n pass", "def tag_resource(ResourceArn=None, Tags=None):\n pass", "def tags():", "def set_tags(self, tags):\r\n current_tags = set(self.tag_names())\r\n updated_tags = set(tags)\r\n removed_tags = current_tags.difference(updated_tags)\r\n new_tags = updated_tags.difference(current_tags)\r\n \r\n for tag in new_tags:\r\n self.add_tag(tag)\r\n \r\n for tag in removed_tags:\r\n self.remove_tag(tag)", "def create_tags(self, resource_name: str, **kwargs) -> dict:\n init_tags = self._global_tags\n init_tags.update(kwargs)\n tags = {}\n for key, value in init_tags.items():\n if key == \"business_unit\":\n self._check_business_unit(value, self._allowed_business_units)\n if key in [\"is_production\"]:\n raise KeyError(f\"{key} is not an allowed argument\")\n tags[key.replace(\"_\", \"-\").lower()] = value\n tags[\"is-production\"] = tags[\"environment-name\"] in [\"alpha\", \"prod\"]\n tags[\"Name\"] = resource_name\n return tags", "def set_tags(self, session, *tags):\n if not tags:\n return list()\n\n result = self._tag(session.put, tags=list(tags), session=session)\n return result['tags']", "def setTags(self,newtags):\n\t\tself.tags = newtags;", "def add_tags():\n\n tags = shallow_copy(e['ResourceProperties'].get('Tags', []))\n tags += [\n {'Key': 'cloudformation:' + 'logical-id', 'Value': e['LogicalResourceId']},\n {'Key': 'cloudformation:' + 'stack-id', 'Value': e['StackId']},\n {'Key': 'cloudformation:' + 'stack-name', 'Value': e['StackId'].split('/')[1]},\n {'Key': 'cloudformation:' + 'properties', 'Value': hash_func(e['ResourceProperties'])}\n ]\n\n acm.add_tags_to_certificate(**{'CertificateArn': e['PhysicalResourceId'], 'Tags': tags})", "def defined_tags(self, defined_tags):\n self._defined_tags = defined_tags", "def tag_resources(\n self,\n request: dds_20151201_models.TagResourcesRequest,\n ) -> dds_20151201_models.TagResourcesResponse:\n runtime = util_models.RuntimeOptions()\n return self.tag_resources_with_options(request, runtime)", "def tags(self) -> pulumi.Output[Optional[Sequence['outputs.EnvironmentTag']]]:\n return pulumi.get(self, \"tags\")", "def set_tags(self, tags):\n for task in self._tasks:\n task.set_tags(tags)\n\n return self", "def AddTags(resource_id, region, **kwargs):\n if not kwargs:\n return\n\n describe_cmd = SoftLayer_PREFIX + [\n '--format',\n 'json',\n 'vs',\n 'detail',\n '%s' % resource_id]\n\n stdout, _ = IssueRetryableCommand(describe_cmd)\n response = json.loads(stdout)\n tags = response['tags']\n\n tag_cmd = SoftLayer_PREFIX + [\n 'vs',\n 'edit']\n\n if tags is not None:\n for tag in tags:\n tag_cmd = tag_cmd + ['--tag', '{0}'.format(tag)]\n\n for key, value in kwargs.items():\n tag_cmd = tag_cmd + ['--tag', '{0}:{1}'.format(key, value)]\n\n tag_cmd = tag_cmd + ['{0}'.format(resource_id)]\n IssueRetryableCommand(tag_cmd)", "def setAddTags(self,value):\n self.PDFreactorConfiguration.in1[\"addTags\"] = value", "def tags(self) -> Mapping[str, str]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Mapping[str, str]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Mapping[str, str]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Mapping[str, str]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Sequence[str]:\n return pulumi.get(self, \"tags\")", "def add_tags(event):\n\n add_tags_from_presets()", "def create_tags(ResourceArn=None, Tags=None):\n pass", "def tags(self) -> Mapping[str, Any]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Mapping[str, Any]:\n return pulumi.get(self, \"tags\")", "def set_tags(self, tags):\n uniques = set()\n distinct = []\n for tag in tags:\n if tag not in uniques:\n distinct.append(tag)\n uniques.add(tag)\n self.__post_changes(distinct)", "def setTag(self, tag):\n\t\tself.config.TAG = tag", "def tags(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'tags')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def save_tags(context):\n items = context.response.json()['items']\n tags = set()\n for item in items:\n for tag in item['tags']:\n tags.add(tag)\n context.tags = list(tags)\n logging.debug('Saved all tags in context.tags:\\n%s', pformat(sorted(context.tags)))", "def tag_instance(self, tags):\n self._request({\"instance-tags\": dict(tags)})", "def allocation_resource_tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"allocation_resource_tags\")", "def allocation_resource_tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"allocation_resource_tags\")", "def tags(self):\n raise BookInfoNotImplementedError('tags', self.__class__.__name__)", "def allocation_resource_tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:\n return pulumi.get(self, \"allocation_resource_tags\")", "def resource_name() -> str:\n return \"contactTags\"", "def __init__(self, tags=''):\n self.tags = tags", "def create_tags(configurationIds=None, tags=None):\n pass", "def update_tags(self, tags, **kwargs):\n request = RequestMiddleware.get_request()\n is_admin = request.user and request.user.is_admin\n # Keep all tags that start with pf: because they are reserved.\n preserved = [tag for tag in self.tags if tag.startswith('pf:')]\n if is_admin:\n remove = [tag[1:] for tag in tags if tag.startswith('-pf:')]\n preserved = [tag for tag in preserved if tag not in remove]\n\n # Filter out new tags that are invalid or reserved.\n accepted = [tag for tag in tags\n if TAG_REGEX_COMPILED.match(tag)\n and (is_admin or not tag.startswith('pf:'))]\n # Limit the number of tags per entity.\n if len(accepted + preserved) > settings.MAX_TAGS_PER_ENTITY:\n accepted = accepted[:settings.MAX_TAGS_PER_ENTITY - len(preserved)]\n self.tags = list(set(accepted + preserved))", "def __init__(__self__, *,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)", "def tags(self, request, tag_list, group):\n return tag_list", "def append_tags(self, tags):\n\n tags = H.to_list(tags)\n # self._tags.update(tags)\n self.tags.update(tags)", "def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PackagingConfigurationTagArgs']]]]:\n return pulumi.get(self, \"tags\")", "def tag_resources_with_options(\n self,\n request: dds_20151201_models.TagResourcesRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.TagResourcesResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.resource_group_id):\n query['ResourceGroupId'] = request.resource_group_id\n if not UtilClient.is_unset(request.resource_id):\n query['ResourceId'] = request.resource_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.resource_type):\n query['ResourceType'] = request.resource_type\n if not UtilClient.is_unset(request.tag):\n query['Tag'] = request.tag\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='TagResources',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.TagResourcesResponse(),\n self.call_api(params, req, runtime)\n )", "def __get_tags(self, name):\n return Tags(\n Environment=\"ApiDev\",\n Name=\"ApiDev-Dev-\"+name,\n Owner=\"Foo industries\",\n Service=\"ServiceVPC\",\n VPC=\"Dev\",\n )", "def tags(self):\n return ['HostRoles/component_name', \\\n 'HostRoles/host_name', \\\n 'HostRoles/cluster_name']", "def tag_names(self, tag_names):\n\n self._tag_names = tag_names", "def custom_tags(self, custom_tags):\n\n self._custom_tags = custom_tags", "def initialize_tags(self):\n\t\tfor tag_enum in Tags:\n\t\t\ttag = Tag(id=tag_enum.value, description=tag_enum.name)\n\t\t\tself.session.add(tag)\n\t\t\tself.session.commit()", "async def tag_resources_async(\n self,\n request: dds_20151201_models.TagResourcesRequest,\n ) -> dds_20151201_models.TagResourcesResponse:\n runtime = util_models.RuntimeOptions()\n return await self.tag_resources_with_options_async(request, runtime)", "def add_tags(self, tags):\n\n if isinstance(tags, string_types):\n message = \"tags should be a list or None, got tags={}\".format(tags)\n raise TypeError(message)\n\n self.tags = self.tags.union(tags)", "def add_tags(self, tags):\n cp = self.copy()\n cp.tags = cp.tags.union(set(tags))\n return cp", "def list_tags_for_resource(ResourceId=None, NextToken=None, Limit=None):\n pass", "def remove_tags_from_resource(ResourceId=None, TagKeys=None):\n pass", "def resources(self, value):\n self._resource_objects = value", "def __init__(self, tags):\n self.tags = tags", "def tags(self):\r\n return Tags(self)", "def tags(self):\r\n return Tags(self)", "def get_resources(self):\n client = self.client\n if self.resources:\n return self.resources\n\n response = client.list_buckets()\n for resource in response['Buckets']:\n resource_name = resource['Name']\n tags = client.get_bucket_tagging(\n Bucket=resource_name\n )\n self.resources.append({\n \"Name\": resource_name,\n \"Tags\": tags.get(\"TagSet\")\n })\n\n return self.resources", "def resources(self, resources):\n self._resources = resources", "def list_tags(ResourceArn=None):\n pass", "def hook_tags_for_projects(task):\n if task['project'] in TAGS_FOR_PROJECTS.keys():\n for tag in TAGS_FOR_PROJECTS[task['project']]:\n task['tags'].add(tag)", "def handle_tags(self, request):\n \"\"\"\n @api {get} /tags List tags\n @apiName GetTags\n @apiGroup Misc\n @apiVersion 1.0.0\n\n @apiDescription List currenty used tags\n\n @apiSuccessExample {json} Example response:\n [\n \"tag1\",\n \"tag2\"\n ]\n \"\"\"\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n tags = []\n\n for task in self.cluster.config.get('tasks').values():\n if 'tags' in task:\n tags += task['tags']\n\n tags = list(set(tags))\n\n return HTTPReply(code = 200, body = json.dumps(tags), headers = headers)", "def set_tag(self, scope, key, value):\r\n self._tags[scope][key] = value\r\n print 'SET', scope, key, value, self._tags", "def tags_dict(self):\n return ({'name': 'tag', 'attrs': {'k': k, 'v': v}} for k, v in self.tags.items())", "def tags(self, val: list):\n self._tags = []\n if val is not None:\n for item in val:\n self._tags.append(item)", "def ex_describe_tags(self, node):\n params = { 'Action': 'DescribeTags',\n 'Filter.0.Name': 'resource-id',\n 'Filter.0.Value.0': node.id,\n 'Filter.1.Name': 'resource-type',\n 'Filter.1.Value.0': 'instance',\n }\n\n result = self.connection.request(self.path,\n params=params.copy()).object\n\n tags = {}\n for element in self._findall(result, 'tagSet/item'):\n key = self._findtext(element, 'key')\n value = self._findtext(element, 'value')\n\n tags[key] = value\n return tags", "def tags(self):\n return self.get(\"tags\")", "def tag_ids(self, tag_ids):\n\n self._tag_ids = tag_ids", "def list_tags_for_resource(ResourceArn=None):\n pass", "def tags(self) -> dict:\n return self._tags", "def __init__(__self__, *,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)", "def tags(self) -> Optional[Any]:\n return pulumi.get(self, \"tags\")", "def tag(self, tag):\n self.tag = tag", "def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ContainerTagArgs']]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[Sequence['outputs.ApplicationTag']]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[Sequence['outputs.ApplicationTag']]:\n return pulumi.get(self, \"tags\")", "def aws_tags(self, values):\n if not getattr(self, \"tags\", None):\n self.tags = {}\n\n tags = defaultdict(list)\n\n for tag in values:\n tags[tag[\"Key\"]].append(tag[\"Value\"])\n\n self.tags.update(tags)\n self._transform_known_tags()" ]
[ "0.7253586", "0.7253586", "0.7253586", "0.71411216", "0.71411216", "0.71411216", "0.71411216", "0.71411216", "0.71411216", "0.71411216", "0.71411216", "0.71411216", "0.71411216", "0.70391154", "0.6818384", "0.6521481", "0.65186703", "0.6484757", "0.6386305", "0.63736135", "0.63720703", "0.6348573", "0.6315282", "0.63069445", "0.62737304", "0.62737304", "0.62737304", "0.625226", "0.6237588", "0.619663", "0.61576015", "0.61543006", "0.61361843", "0.61112654", "0.60376364", "0.6031545", "0.6012519", "0.60074174", "0.59733707", "0.5947739", "0.5947739", "0.5947739", "0.5947739", "0.59294194", "0.5926727", "0.5921485", "0.5917415", "0.5917415", "0.59138846", "0.5877511", "0.58596504", "0.58427536", "0.58391887", "0.5790259", "0.5790259", "0.57797784", "0.5770259", "0.5751251", "0.57472295", "0.5715266", "0.57014805", "0.5699238", "0.5698222", "0.56843764", "0.5672603", "0.56649965", "0.56645495", "0.56640166", "0.56574625", "0.5646573", "0.5638195", "0.56355274", "0.5625256", "0.56144977", "0.56100386", "0.5609386", "0.5607538", "0.5598015", "0.5596249", "0.5596249", "0.5594657", "0.55942047", "0.55781096", "0.55616134", "0.55575573", "0.55574065", "0.555644", "0.55446976", "0.5541648", "0.5540368", "0.55400044", "0.5537826", "0.5531521", "0.5528734", "0.55215365", "0.5520191", "0.55132055", "0.54977083", "0.54977083", "0.54968196" ]
0.63703835
21
The name of the Elastic Beanstalk configuration template to use with the environment.
def template_name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "template_name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_configuration_template(self):\n return CONFIG_TEMPLATE", "def template_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"template_name\")", "def inspect_template_name(self) -> str:\n return pulumi.get(self, \"inspect_template_name\")", "def template(self):\n return self.conf.get(\"template\", None)", "def stackname(self):\n return self.BASE_NAME.format(**self.conf)", "def name(self):\n return self._config_name", "def health_check_template_name(self) -> str:\n return pulumi.get(self, \"health_check_template_name\")", "def name(self):\n return self._config.get(CONF_NAME)", "def template_name(self, template_type: Union[TemplateType, str]) -> str:\n return self.options.get(\"templates\", {}).get(template_type, template_type)", "def launch_template_name(self) -> Optional[str]:\n return pulumi.get(self, \"launch_template_name\")", "def getGenericConfigFileName(self):\n executePkgDir = lsst.utils.getPackageDir('ctrl_execute')\n\n name = \"config_with_%s.py.template\" % self.setup_using\n genericConfigName = os.path.join(executePkgDir,\n \"etc\", \"templates\", self.manager, name)\n if os.path.exists(genericConfigName):\n return genericConfigName\n raise RuntimeError(\"File %s not found; check etc/templates.\" %\n genericConfigName)", "def template_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"template_name\")", "def get_config_template(self) -> cconfig.Config:", "def get_config_name(self): # pragma: no cover\n pass", "def template_name(self):\n\t\traise NotImplementedError('template_name must be defined')", "def platform_config_filename(region, account_prefix, prod):\n return 'infra/platform-config/%s/%s/%s.json' % (\n account_prefix, \"prod\" if prod else \"dev\", region\n )", "def launch_template_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"launch_template_name\")", "def launch_template_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"launch_template_name\")", "def name(self) -> str:\n name = self._config[\"name\"]\n assert isinstance(name, str) # noqa: S101\n return name", "def name(self):\n return self.config[\"name\"]", "def template(self) -> str:\n manifest = self._get_manifest()\n\n return manifest[\"template\"]", "def template_path(self):\n return self.get_config(\"templates\")", "def _getConfigName(self):\n pass", "def name(self):\n return self._env_name", "def env_name(self):\n return f\"{self.project_name}-{self.stage}\"", "def name(self):\n return f'{self._config[CONF_NAME]} {self._typeconf[\"name\"]}'", "def environment_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"environment_name\")", "def get_name():\n return config.APP_NAME", "def _get_config_template(self, key):\n tmp_path = self._get_config_value('templates', 'path') + key\n return tmp_path", "def environment_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"environment_name\")", "def config_file_name(self):\n return self._config_file_name", "def environment_label(self) -> str:\n return self._environment_label", "def get_context_template_name(self):\n return getattr(self, 'context_template_name', None)", "def template(self) -> Optional[pulumi.Input['InstanceTemplateSpecArgs']]:\n return pulumi.get(self, \"template\")", "def get_config_file_name(self):\n argv = sys.argv\n config_type = \"dev\" # default configuration type\n if None != argv and len(argv) > 1 :\n config_type = argv[1]\n config_file = config_type + \".cfg\"\n logger.info(\"get_config_file_name() return : \" + config_file)\n return config_file", "def production_settings_name():\n if hasattr(SettingsType, 'AWS'):\n # Hawthorn and Ironwood\n return getattr(SettingsType, 'AWS')\n else:\n # Juniper and beyond.\n return getattr(SettingsType, 'PRODUCTION')", "def get_configuration_template(self):\n return {'EXAMPLE_KEY_1': \"Example value\",\n 'EXAMPLE_KEY_2': [\"Example\", \"Value\"]\n }", "def access_configuration_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"access_configuration_name\")", "def get_configuration_template(self):\n return {'EXAMPLE_KEY_1': \"Example value\",\n 'EXAMPLE_KEY_2': [\"Example\", \"Value\"]\n }", "def get_env_name(self):\n if self.options.environment:\n return self.options.environment\n elif os.environ.get(\"JUJU_ENV\"):\n return os.environ['JUJU_ENV']\n\n env_ptr = os.path.join(self.juju_home, \"current-environment\")\n if os.path.exists(env_ptr):\n with open(env_ptr) as fh:\n return fh.read().strip()\n\n with open(self.get_env_conf()) as fh:\n conf = yaml.safe_load(fh.read())\n if not 'default' in conf:\n raise ConfigError(\"No Environment specified\")\n return conf['default']", "def sirsam_bs_conf(sirsam_bootstrap):\n return os.path.join(sirsam_bootstrap, 'bootstrapping.yaml')", "def _get_container_name(self) -> str:\n dirname = os.path.basename(os.getcwd())\n default_container_name = f\"{dirname}_{self.config_name}\"\n container_name = self.config_options.get(\"container_name\", default_container_name)\n return container_name", "def get_config_name():\n name = CONFIG_FILE_NAME\n for i, arg in enumerate(sys.argv):\n if arg.startswith('--config'):\n if arg == '--config':\n return sys.argv[i + 1]\n else:\n name = sys.argv[i].split('--config')[1]\n if name[0] == '=':\n name = name[1:]\n return name\n\n return name", "def configFilename(self):\n return self.name()+'.py'", "def template_dir(self):\n return self.cm.get(YAML_CONFIG_TEMPLATE_DIR)", "def name(self):\n return get_env_name(self.tool_name,\n self._python,\n self._requirements,\n self._tagged_env_vars)", "def configuration_set_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"configuration_set_name\")", "def render_template(self):\n apps = [{\n 'name': container.name,\n 'image': container.image,\n 'environment': container.environment,\n 'memory': container.memory,\n 'portMappings': container.portmappings\n } for container in self.containers]\n\n t = self.templates.get_template('aws/containers.template')\n return t.render(apps=apps, family=self.family)", "def _make_config_file_name(environment, out=False):\n return os.path.join(PH_HOME_DIR, \"etc/config\", \"%s.conf\" % environment) if out else \\\n os.path.join(PH_HOME_DIR, \"config\", \"%s.conf.in\" % environment)", "def getNameTemplate(self):\n\n return self.nameTemplate", "def get_name(self):\n if 'label' in self.configs_:\n return self.configs_['label']\n return self.configs_['id']", "def config(self):\n state_file_id = \"{env}-{component}\".format(env=self.environment, component=self.component)\n\n grunt_config_template = \"\"\"lock = {{\nbackend = \"dynamodb\"\nconfig {{\nstate_file_id = \"{state_file_id}\"\naws_region = \"{region}\"\ntable_name = \"terragrunt_locks\"\nmax_lock_retries = 360\n}}\n}}\nremote_state = {{\nbackend = \"s3\"\nconfig {{\nencrypt = \"true\"\nbucket = \"{s3_bucket}\"\nkey = \"{env}/{component}/terraform.tfstate\"\nregion = \"{region}\"\n}}\n}}\"\"\"\n\n with open('.terragrunt', 'w') as f:\n f.write(grunt_config_template.format(\n state_file_id=state_file_id,\n region=self.metadata['REGION'],\n s3_bucket=self.s3_bucket,\n env=self.environment,\n component=self.component\n ))", "def template(c, release=\"url-shortener\"):\n c.run(f\"helm template {release} {HELM_CHART_DIR} > ./generated-deployment.yml\")", "def _create_config(env_path):\n s2e_yaml = 's2e.yaml'\n version_path = os.path.join(os.path.dirname(__file__), '..', 'dat', 'VERSION')\n\n with open(version_path, 'r', encoding='utf-8') as fp:\n context = {\n 'creation_time': str(datetime.datetime.now()),\n 'version': fp.read().strip(),\n }\n\n render_template(context, s2e_yaml, os.path.join(env_path, s2e_yaml))", "def get_template_filename(template):\n config = read_config(SETTINGS_PATH)\n #String templates\n if (template in STRING_TEMPLATES):\n options = config.options(STRING_TEMPLATES_SECTION) \n for option in options:\n if (option==template):\n #Get root path for the templates\n root_path = config.get(TEMPLATES_SECTION,TEMPLATES_ROOT_PATH)\n #Get the strings path templates\n strings_path = config.get(STRING_TEMPLATES_SECTION,STRING_TEMPLATES_PATH)\n return join(root_path,strings_path),config.get(STRING_TEMPLATES_SECTION,option)", "def get_jinja_filename_environment(templates) -> jinja2.Environment:\n loader = jinja2.DictLoader(\n {template.name: template.name for template in templates}\n )\n return jinja2.Environment(\n loader=loader, trim_blocks=True, lstrip_blocks=True\n )", "def setup_config():\n if CONFIG.get(\"environment\", \"server\") == 'production':\n return 'config.ProductionConfig'\n else:\n return 'config.TestingConfig'", "def deploy_cfg():\n return '{buildout}.cfg'.format(buildout=env.host.split('.')[0])", "def current_config():\n if os.environ[\"ENVIRONMENT\"] == \"production\":\n return Production()\n elif os.environ[\"ENVIRONMENT\"] == \"staging\":\n return Staging()\n elif os.environ[\"ENVIRONMENT\"] == \"testing\":\n return Testing()\n elif os.environ[\"ENVIRONMENT\"] == \"development\":\n return Development()\n else:\n raise KeyError(f\"Unknown environment '{os.environ['ENVIRONMENT']}'\")", "def get_template_name(self):\n template = None\n if self.template:\n template = self.template\n if not template:\n for p in self.get_ancestors(ascending=True):\n if p.template:\n template = p.template\n break\n if not template:\n template = settings.CMS_TEMPLATES[0][0]\n for t in settings.CMS_TEMPLATES:\n if t[0] == template:\n return t[1] \n return _(\"default\")", "def get_template(self, template):\n\n template_path = aj.config.data['email']['templates'].get(template, 'default')\n\n if template_path == 'default' or not os.path.isfile(template_path):\n template_path = DEFAULT_TEMPLATES[template]\n\n return template_path", "def get_launch_template(lt_name):\n logger.info(f'Describing launch template for {lt_name}...')\n response = ec2_client.describe_launch_templates(LaunchTemplateNames=[lt_name])\n return response['LaunchTemplates'][0]", "def configuration_configmap_name(self) -> Optional[str]:\n return pulumi.get(self, \"configuration_configmap_name\")", "def access_configuration_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_configuration_name\")", "def container_name(self):\n pass", "def module_name(self):\n return self.config_section", "def _get_environment():\n namespace = current_app.config.get('POD_NAMESPACE').lower()\n if namespace.endswith('dev'):\n return 'DEV'\n if namespace.endswith('test'):\n return 'TEST'\n if namespace.endswith('tools'):\n return 'SANDBOX'\n return ''", "def name_tag(resource_name):\n return Join(\"\", [Ref('AWS::StackName'), '-', resource_name])", "def access_configuration_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"access_configuration_name\")", "def name(self):\n return self._config.backend_name", "def get_instance_template(self, name):\n return self.call_api('/global/instanceTemplates/%s' % name)", "def tracing_name(name: Optional[str] = None) -> str:\n if name is None:\n name = settings.SERVICE_NAME\n return f\"{name}.{settings.ENVIRONMENT.lower()}\"", "def secrets_bucket_name(self):\n return self.config.secrets_bucket", "def configuration_set_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"configuration_set_name\")", "def get_sls_config_file(path, stage, region):\n for name in gen_sls_config_files(stage, region):\n if os.path.isfile(os.path.join(path, name)):\n return name\n return \"config-%s.json\" % stage # fallback to generic json name", "def _get_template():\n r = get('http://metadata.google.internal/'\n 'computeMetadata/v1/instance/attributes/instance-template',\n headers={'Metadata-Flavor': 'Google'})\n if r.status_code == 200:\n return sub(r'.+instanceTemplates/(.+)', r'\\1', r.text)\n else:\n return ''", "def config_bundle(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"config_bundle\")", "def env_name(pre_chars='(', post_chars=')'):\n env_path = builtins.__xonsh_env__.get('VIRTUAL_ENV', '')\n if len(env_path) == 0 and xp.ON_ANACONDA:\n env_path = builtins.__xonsh_env__.get('CONDA_DEFAULT_ENV', '')\n env_name = os.path.basename(env_path)\n if env_name:\n return pre_chars + env_name + post_chars", "def _get_template_fname(self):\n template_fname = self._context.get('template_fname', False)\n return template_fname", "def get_template_name(self):\n if self.template_name is not None:\n return self.template_name\n model_opts = self.queryset.model._meta\n return f\"{model_opts.app_label}/{model_opts.model_name}.html\"", "def get_template_name(self):\n if self.template_name is not None:\n return self.template_name\n model_opts = self.queryset.model._meta\n return f\"{model_opts.app_label}/{model_opts.model_name}.html\"", "def generate_haproxy_config(template=None, instances=None):\n\n return Template(filename=template).render(instances=instances)", "def GenerateConfig(context):\n\n resources = [{\n 'name': context.env['name'],\n 'type': 'compute.v1.instance',\n 'properties': {\n 'zone': context.properties['zone'],\n 'machineType': ''.join([COMPUTE_URL_BASE, 'projects/',\n context.env['project'], '/zones/',\n context.properties['zone'], '/machineTypes/',\n context.properties['machineType']]),\n 'disks': [{\n 'deviceName': 'boot',\n 'type': 'PERSISTENT',\n 'boot': True,\n 'autoDelete': True,\n 'initializeParams': {\n 'sourceImage': ''.join([COMPUTE_URL_BASE, 'projects/',\n 'ubuntu-os-cloud/global/',\n 'images/family/ubuntu-1604-lts'])\n }\n }],\n 'networkInterfaces': [{\n 'network': '$(ref.' + context.properties['network']\n + '.selfLink)',\n 'accessConfigs': [{\n 'name': 'External NAT',\n 'type': 'ONE_TO_ONE_NAT'\n }]\n }],\n 'metadata': {\n 'items': [{\n 'key': 'startup-script',\n 'value': ''.join(['#!/bin/bash\\n',\n 'sudo apt-get install openjdk-9-jre-headless -y\\n',\n 'sudo python -m SimpleHTTPServer 80'])\n }]\n }\n }\n }]\n return {'resources': resources}", "def _get_template_filename(self):\n _format = self.cfg.get('mutations', 'format')\n if _format == 'pdf':\n tf = 'PDFTemplate.bt'\n elif _format == 'png':\n tf = 'PNG12Template.bt'\n\n module_dir = os.path.dirname(os.path.abspath(__file__))\n\n return os.path.join(module_dir, templates_dir, tf)", "def runtime_config(self) -> str:\n return self._node[\"app_data\"].get(\"runtime_config\")", "def get_template_name(self):\n if self.template_name:\n return self.template_name\n\n if Path('_templates/global/WaitPage.html').exists():\n return 'global/WaitPage.html'\n return 'otree/WaitPage.html'", "def configuration_set_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"configuration_set_name\")", "def get_named_config(config_name: str = 'production'):\n if config_name in ['production', 'staging', 'default']:\n config = ProdConfig()\n elif config_name == 'testing':\n config = TestConfig()\n elif config_name == 'development':\n config = DevConfig()\n else:\n raise KeyError(f'Unknown configuration: {config_name}')\n return config", "def template_dir(self):\n return os.path.join(Config().template_dir(), 'platform')", "def name(self):\n return self.__class__.get_setting_name(self.key, **self.get_kwargs())", "def get_named_config(config_name: str = 'production'):\n if config_name in ['production', 'staging', 'default']:\n config = ProdConfig()\n elif config_name == 'testing':\n config = TestConfig()\n elif config_name == 'development':\n config = DevConfig()\n else:\n raise KeyError(f\"Unknown configuration '{config_name}'\")\n return config", "def _k8s_service_name(self):\n return \"{}-ssh-service\".format(self.app.name)", "def get(config_name):\n if config_name.lower() in GlobalConfig.__CONFIG__:\n return GlobalConfig.__CONFIG__[config_name.lower()]", "def getConfigFileName(self):\n return self._configFileName", "def get_branded_template(self, brand, template_name, deprecated_template_name):\n\n # If the deprecated setting is defined, return it.\n try:\n return self.cfg.get(*deprecated_template_name)\n except configparser.NoOptionError:\n pass\n\n # If a brand hint is provided, attempt to use it if it is valid.\n if brand:\n if brand not in self.valid_brands:\n brand = None\n\n # If the brand hint is not valid, or not provided, fallback to the default brand.\n if not brand:\n brand = self.cfg.get(\"general\", \"brand.default\")\n\n root_template_path = self.cfg.get(\"general\", \"templates.path\")\n\n # Grab jinja template if it exists\n if os.path.exists(\n os.path.join(root_template_path, brand, template_name + \".j2\")\n ):\n return os.path.join(brand, template_name + \".j2\")\n else:\n return os.path.join(root_template_path, brand, template_name)", "def stack_name(self) -> str:\n return jsii.get(self, \"stackName\")", "def name(self) -> str:\n return self.config_name or self.host_name or self.dev_id or DEVICE_DEFAULT_NAME", "def kafka_configuration_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"kafka_configuration_name\")", "def get_balancer_name(self):\n return '{}-{}'.format(\n self.config['namespace'],\n self.get_current_env(),\n )", "def container_name(self):\n prefix = get_service_prefix('core' if self.core else 'service')\n return f'{prefix}{self.data.get(\"name\")}'" ]
[ "0.6748205", "0.65170735", "0.65021986", "0.64853156", "0.6403302", "0.6382873", "0.6355259", "0.6352765", "0.6313939", "0.6291501", "0.6274407", "0.6263015", "0.62534225", "0.61411935", "0.6131308", "0.6107288", "0.6077706", "0.6076934", "0.6012232", "0.6004269", "0.5972697", "0.59527344", "0.5915369", "0.5903002", "0.5884017", "0.5869741", "0.58544135", "0.582657", "0.58240235", "0.580214", "0.57279867", "0.57277167", "0.57170767", "0.5651985", "0.5631418", "0.56199586", "0.55817556", "0.55736417", "0.5566159", "0.5565768", "0.5561328", "0.5533582", "0.5518216", "0.5517301", "0.551714", "0.55054337", "0.55038804", "0.5497479", "0.5489323", "0.54765606", "0.5458444", "0.54466426", "0.54381937", "0.5437714", "0.5436673", "0.5434237", "0.541376", "0.5411845", "0.5410064", "0.54095376", "0.54044765", "0.53905547", "0.5383216", "0.53566635", "0.53493273", "0.5344794", "0.53300136", "0.53231716", "0.5320817", "0.5310631", "0.5275727", "0.52680886", "0.5248806", "0.5248233", "0.52452064", "0.5232593", "0.5227593", "0.52189654", "0.5212605", "0.52054703", "0.52054703", "0.520477", "0.5204012", "0.5195742", "0.51896477", "0.51805", "0.5179747", "0.51728404", "0.517276", "0.5169071", "0.5167558", "0.51642215", "0.5162913", "0.5162131", "0.51618487", "0.5161578", "0.5160083", "0.51489216", "0.51307863", "0.5122262" ]
0.62606674
12
Specifies the tier to use in creating this environment. The environment tier that you choose determines whether Elastic Beanstalk provisions resources to support a web application that handles HTTP(S) requests or a web application that handles backgroundprocessing tasks.
def tier(self) -> Optional[pulumi.Input['EnvironmentTierArgs']]: return pulumi.get(self, "tier")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tier(self):\n\n if not hasattr(self, \"_tier\"):\n self._tier = self.opts.get(\"tier\")\n return self._tier", "def tier(self) -> pulumi.Output[Optional['outputs.EnvironmentTier']]:\n return pulumi.get(self, \"tier\")", "def set_tier(self, tier):\n self.single_selection_from_static_kendo_dropdown(self.tier_kendo_dropdown_locator, tier)", "def tier(self) -> Optional[pulumi.Input['InstanceTier']]:\n return pulumi.get(self, \"tier\")", "def tier(self) -> str:\n return pulumi.get(self, \"tier\")", "def tier(self) -> str:\n return pulumi.get(self, \"tier\")", "def tier(self) -> str:\n return pulumi.get(self, \"tier\")", "def get_tier(self) -> str:\n tier = self.raw_param.get(\"tier\")\n if not tier:\n return \"\"\n\n tierStr = tier.lower()\n if tierStr == CONST_MANAGED_CLUSTER_SKU_TIER_FREE and self._get_uptime_sla(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--uptime-sla\" and \"--tier free\" at the same time.'\n )\n\n if tierStr == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD and self._get_no_uptime_sla(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--no-uptime-sla\" and \"--tier standard\" at the same time.'\n )\n\n return tierStr", "def tier(self, tier):\n\n self._tier = tier", "def tier(self) -> Optional[str]:\n return pulumi.get(self, \"tier\")", "def tier(self) -> Optional[str]:\n return pulumi.get(self, \"tier\")", "def tier(self) -> Optional[str]:\n return pulumi.get(self, \"tier\")", "def tier(self) -> Optional[str]:\n return pulumi.get(self, \"tier\")", "def tier(self) -> Optional[pulumi.Input[Union[str, 'CapacitySkuTier']]]:\n return pulumi.get(self, \"tier\")", "def tier(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tier\")", "def tier(self) -> Optional[pulumi.Input[Union[str, 'VCoreSkuTier']]]:\n return pulumi.get(self, \"tier\")", "def tier(self):\n return self._tier", "def AddTier(parser, is_patch=False):\n help_text = (\n \"Machine type for a shared-core instance e.g. ``db-g1-small''. \"\n 'For all other instances, instead of using tiers, customize '\n 'your instance by specifying its CPU and memory. You can do so '\n 'with the `--cpu` and `--memory` flags. Learn more about how '\n 'CPU and memory affects pricing: '\n 'https://cloud.google.com/sql/pricing.'\n )\n if is_patch:\n help_text += ' WARNING: Instance will be restarted.'\n\n parser.add_argument('--tier', '-t', required=False, help=help_text)", "def access_tier(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_tier\")", "def configure_tiers(self, datacenter, tier):\n print \"Enabling tier %s...\" % tier\n tiers = datacenter.listTiers()\n\n tiers[0].setName(tier)\n tiers[0].update()\n\n for i in range(1, 4):\n tiers[i].setEnabled(False)\n tiers[i].update()\n\n return tiers[0]", "def tier_number(self, tier_number):\n\n self._tier_number = tier_number", "def run_on_tier(self, tier, tierY=None):\n raise NotImplementedError", "def post(self, tier):\n\n if self._from_cluster:\n raise exception.OperationNotPermitted\n\n try:\n tier = tier.as_dict()\n LOG.debug(\"storage tier post dict= %s\" % tier)\n\n new_tier = _create(self, tier)\n except exception.SysinvException as e:\n LOG.exception(e)\n raise wsme.exc.ClientSideError(_(\"Invalid data: failed to create \"\n \"a storage tier object\"))\n\n return StorageTier.convert_with_links(new_tier)", "def tier(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"tier\")", "def price_tier(self):\n return self._safe_value(VAR_PRICETIER, str)", "def __init__(__self__, *,\n name: str,\n tier: str):\n pulumi.set(__self__, \"name\", name)\n pulumi.set(__self__, \"tier\", tier)", "def get_pvp_tier(self, region, namespace, tier_id, **filters):\n filters['namespace'] = namespace\n resource = 'data/wow/pvp-tier/{0}'\n return self.get_resource(resource, region, *[tier_id], **filters)", "def tier_explanation(self, tier_explanation):\n\n self._tier_explanation = tier_explanation", "def tier_2160p(self, tier_2160p):\n\n self._tier_2160p = tier_2160p", "def create(self, callback=None):\n\n parms = [{'budget': self.budget,\n 'deployment': {'deploymentId': self.deployment},\n 'description': self.description,\n 'name': self.name,\n 'minimumServers': self.minimum_servers,\n 'maximumServers': self.maximum_servers,\n 'breachIncrement': self.breach_increment,\n 'breachPeriodInMinutes': self.breach_period_in_minutes,\n 'cooldownPeriodInMinutes': self.cooldown_period_in_minutes,\n 'lowerCpuThreshold': self.lower_cpu_threshold,\n 'upperCpuThreshold': self.upper_cpu_threshold,\n 'lowerRamThreshold': self.lower_ram_threshold,\n 'upperRamThreshold': self.upper_ram_threshold}]\n\n payload = {'addTier':camel_keys(parms)}\n\n response=self.post(data=json.dumps(payload))\n if self.last_error is None:\n self.load()\n return response\n else:\n raise TierCreationException(self.last_error)", "async def addTier(self, ctx, tier):\n server_dict = self.get_server_dict(ctx)\n tierList = server_dict.setdefault(\"Tiers\", [])\n \n try:\n tierList.append(tier)\n self.save_data()\n await self.bot.say(\":white_check_mark: {0} added to tier list\".format(tier))\n except:\n await self.bot.say(\":x: Error adding {0} to the tier list\".format(tier))", "def GachaCraftNodeExcelAddTier(builder, Tier):\n return AddTier(builder, Tier)", "def patch(self, tier_uuid, patch):\n\n if self._from_cluster:\n raise exception.OperationNotPermitted\n\n LOG.debug(\"patch_data: %s\" % patch)\n\n rpc_tier = objects.storage_tier.get_by_uuid(pecan.request.context,\n tier_uuid)\n\n patch_obj = jsonpatch.JsonPatch(patch)\n backend = dict(name='*unknown*')\n for p in patch_obj:\n if p['path'] == '/backend_uuid':\n p['path'] = '/forbackendid'\n backend = objects.storage_backend.get_by_uuid(pecan.request.context,\n p['value'])\n p['value'] = backend.id\n elif p['path'] == '/cluster_uuid':\n p['path'] = '/forclusterid'\n cluster = objects.cluster.get_by_uuid(pecan.request.context,\n p['value'])\n p['value'] = cluster.id\n otier = copy.deepcopy(rpc_tier)\n\n # Validate provided patch data meets validity checks\n _pre_patch_checks(rpc_tier, patch_obj)\n\n try:\n tier = StorageTier(**jsonpatch.apply_patch(rpc_tier.as_dict(),\n patch_obj))\n except utils.JSONPATCH_EXCEPTIONS as e:\n raise exception.PatchError(patch=patch, reason=e)\n\n # Semantic Checks\n _check(self, \"modify\", tier.as_dict())\n try:\n # Update only the fields that have changed\n for field in objects.storage_tier.fields:\n if rpc_tier[field] != getattr(tier, field):\n rpc_tier[field] = getattr(tier, field)\n\n # Obtain the fields that have changed.\n delta = rpc_tier.obj_what_changed()\n if len(delta) == 0:\n raise wsme.exc.ClientSideError(\n _(\"No changes to the existing tier settings were detected.\"))\n\n allowed_attributes = ['name']\n for d in delta:\n if d not in allowed_attributes:\n raise wsme.exc.ClientSideError(\n _(\"Cannot modify '%s' with this operation.\" % d))\n\n LOG.info(\"SYS_I orig storage_tier: %s \" % otier.as_dict())\n LOG.info(\"SYS_I new storage_tier: %s \" % rpc_tier.as_dict())\n\n if 'name' in delta:\n default_tier_name = constants.SB_TIER_DEFAULT_NAMES[constants.SB_TIER_TYPE_CEPH]\n if rpc_tier.name == default_tier_name:\n raise wsme.exc.ClientSideError(\n _(\"Cannot modify tier '%s'. Name '%s' is used \"\n \"by the default tier\" % (otier.name, rpc_tier.name)))\n self._ceph.crushmap_tier_rename(otier.name, rpc_tier.name)\n\n # Save and return\n rpc_tier.save()\n return StorageTier.convert_with_links(rpc_tier)\n except (exception.HTTPNotFound, exception.CephFailure) as e:\n msg = _(\"Storage Tier update failed: backend %s storage tier %s : patch %s. \"\n \" Reason: %s\") % (backend['name'], otier['name'], patch, str(e))\n raise wsme.exc.ClientSideError(msg)", "def worker_tier_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"worker_tier_name\")", "def worker_tier_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"worker_tier_name\")", "def create(profile, name, application, cname=None, version=None,\n tier=\"web\", key_pair=None, instance_type=\"t1.micro\",\n instance_profile=None, service_role=None,\n healthcheck_url=None, security_groups=None,\n max_instances=1, min_instances=1, tags=None,\n vpc_id=None, subnets=None, db_subnets=None,\n elb_subnets=None, elb_scheme=None,\n public_ip=None, root_volume_size=None):\n client = boto3client.get(\"elasticbeanstalk\", profile)\n params = {}\n params[\"ApplicationName\"] = application\n params[\"EnvironmentName\"] = name\n if cname:\n params[\"CNAMEPrefix\"] = cname\n if version:\n params[\"VersionLabel\"] = version\n stack = utils.get_multicontainer_docker_solution_stack(profile)\n params[\"SolutionStackName\"] = stack \n if tier == \"web\":\n tier_definition = {\n \"Name\": \"WebServer\",\n \"Type\": \"Standard\",\n \"Version\": \"1.0\",\n }\n elif tier == \"worker\":\n tier_definition = {\n \"Name\": \"Worker\",\n \"Type\": \"SQS/HTTP\",\n \"Version\": \"1.0\",\n }\n else:\n raise Exception(\"tier must be 'web' or 'worker'\")\n params[\"Tier\"] = tier_definition\n if tags:\n params[\"Tags\"] = tags\n options = []\n if key_pair:\n key_pair_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"EC2KeyName\",\n \"Value\": key_pair,\n }\n options.append(key_pair_option)\n if instance_type:\n instance_type_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"InstanceType\",\n \"Value\": instance_type,\n }\n options.append(instance_type_option)\n if instance_profile:\n profile_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"IamInstanceProfile\",\n \"Value\": instance_profile,\n }\n options.append(profile_option)\n if service_role:\n role_option = {\n \"Namespace\": \"aws:elasticbeanstalk:environment\",\n \"OptionName\": \"ServiceRole\",\n \"Value\": service_role,\n }\n options.append(role_option)\n if healthcheck_url:\n healthcheck_url_option = {\n \"Namespace\": \"aws:elasticbeanstalk:application\",\n \"OptionName\": \"Application Healthcheck URL\",\n \"Value\": healthcheck_url,\n }\n options.append(healthcheck_url_option)\n if security_groups:\n security_groups_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"SecurityGroups\",\n \"Value\": \",\".join(security_groups),\n }\n options.append(security_groups_option)\n if min_instances:\n min_instances_option = {\n \"Namespace\": \"aws:autoscaling:asg\",\n \"OptionName\": \"MinSize\",\n \"Value\": str(min_instances),\n }\n options.append(min_instances_option)\n if max_instances:\n max_instances_option = {\n \"Namespace\": \"aws:autoscaling:asg\",\n \"OptionName\": \"MaxSize\",\n \"Value\": str(max_instances),\n }\n options.append(max_instances_option)\n if vpc_id:\n vpc_id_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"VPCId\",\n \"Value\": vpc_id,\n }\n options.append(vpc_id_option)\n if subnets:\n subnets_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"Subnets\",\n \"Value\": \",\".join(subnets),\n }\n options.append(subnets_option)\n if db_subnets:\n db_subnets_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"DBSubnets\",\n \"Value\": \",\".join(db_subnets),\n }\n options.append(db_subnets_option)\n if elb_subnets:\n elb_subnets_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"ELBSubnets\",\n \"Value\": \",\".join(elb_subnets),\n }\n options.append(elb_subnets_option)\n if elb_scheme:\n elb_scheme_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"ELBScheme\",\n \"Value\": elb_scheme,\n }\n options.append(elb_scheme_option)\n if public_ip:\n public_ip_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"AssociatePublicIpAddress\",\n \"Value\": str(public_ip),\n }\n options.append(public_ip_option)\n if root_volume_size:\n root_volume_size_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"RootVolumeSize\",\n \"Value\": str(root_volume_size),\n }\n options.append(root_volume_size_option)\n if options:\n params[\"OptionSettings\"] = options\n return client.create_environment(**params)", "def free_tier():\n return AccountTier.objects.get(id=1)", "def create_tier_from_file():\n parser = ArgumentParser(description=\"Tier JSON Descriptor\")\n if is_valid_file(parser,filename):\n f=open(filename,'r')\n json_object = json.load(f)\n\n new_tier = Tier()\n for value in json_object.values():\n for v in range(0,len(value)):\n new_tier.deployment=value[v]['deployment']['deploymentId']\n new_tier.description = value[v]['description']\n new_tier.name = value[v]['name']\n new_tier.budget = value[v]['budget']\n new_tier.minimum_servers = value[v]['minimumServers']\n new_tier.maximum_servers = value[v]['maximumServers']\n new_tier.breach_increment = value[v]['breachIncrement']\n new_tier.breach_period_in_minutes = value[v]['breachPeriodInMinutes']\n new_tier.cooldown_period_in_minutes = value[v]['cooldownPeriodInMinutes']\n new_tier.lower_cpu_threshold = value[v]['lowerCpuThreshold']\n new_tier.upper_cpu_threshold = value[v]['upperCpuThreshold']\n new_tier.lower_ram_threshold = value[v]['lowerRamThreshold']\n new_tier.upper_ram_threshold = value[v]['upperRamThreshold']\n #result=new_tier.create()\n #print new_tier.current_job", "def __init__(self, tier_2160p=None, tier_1440p=None, tier_1080p=None, tier_720p=None, tier_audio_only=None, local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration.get_default_copy()\n self.local_vars_configuration = local_vars_configuration\n\n self._tier_2160p = None\n self._tier_1440p = None\n self._tier_1080p = None\n self._tier_720p = None\n self._tier_audio_only = None\n self.discriminator = None\n\n if tier_2160p is not None:\n self.tier_2160p = tier_2160p\n if tier_1440p is not None:\n self.tier_1440p = tier_1440p\n if tier_1080p is not None:\n self.tier_1080p = tier_1080p\n if tier_720p is not None:\n self.tier_720p = tier_720p\n if tier_audio_only is not None:\n self.tier_audio_only = tier_audio_only", "def tier_720p(self, tier_720p):\n\n self._tier_720p = tier_720p", "def __init__(__self__, *,\n name: Optional[pulumi.Input[Union[str, 'ManagedClusterSKUName']]] = None,\n tier: Optional[pulumi.Input[Union[str, 'ManagedClusterSKUTier']]] = None):\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)", "def __init__(__self__, *,\n name: pulumi.Input[str],\n capacity: Optional[pulumi.Input[int]] = None,\n tier: Optional[pulumi.Input[Union[str, 'VCoreSkuTier']]] = None):\n pulumi.set(__self__, \"name\", name)\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)", "def external_ip_egress_bandwidth_tier(self) -> Optional[pulumi.Input['NetworkPerformanceConfigExternalIpEgressBandwidthTier']]:\n return pulumi.get(self, \"external_ip_egress_bandwidth_tier\")", "def test_tiers_tier_level_tier_name_put(self):\n pass", "def tier_1080p(self, tier_1080p):\n\n self._tier_1080p = tier_1080p", "def __init__(__self__, *,\n external_ip_egress_bandwidth_tier: Optional[pulumi.Input['NetworkPerformanceConfigExternalIpEgressBandwidthTier']] = None,\n total_egress_bandwidth_tier: Optional[pulumi.Input['NetworkPerformanceConfigTotalEgressBandwidthTier']] = None):\n if external_ip_egress_bandwidth_tier is not None:\n pulumi.set(__self__, \"external_ip_egress_bandwidth_tier\", external_ip_egress_bandwidth_tier)\n if total_egress_bandwidth_tier is not None:\n pulumi.set(__self__, \"total_egress_bandwidth_tier\", total_egress_bandwidth_tier)", "def __init__(self, name_i, tier, size):\r\n self.name_i = name_i\r\n self.tier = tier\r\n self.size = size", "def get_one(self, tier_uuid):\n\n if self._from_cluster:\n raise exception.OperationNotPermitted\n\n rpc_tier = objects.storage_tier.get_by_uuid(pecan.request.context,\n tier_uuid)\n return StorageTier.convert_with_links(rpc_tier)", "def __init__(__self__, *,\n name: pulumi.Input[str],\n capacity: Optional[pulumi.Input[int]] = None,\n tier: Optional[pulumi.Input[Union[str, 'CapacitySkuTier']]] = None):\n pulumi.set(__self__, \"name\", name)\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)", "def get_pvp_tier_index(self, region, namespace, **filters):\n filters['namespace'] = namespace\n resource = 'data/wow/pvp-tier/index'\n return self.get_resource(resource, region, **filters)", "def PRODUCTION(cls):\n\n return DataCenter.Environment(\"https://www.zohoapis.eu\", cls().get_iam_url(), cls().get_file_upload_url())", "def get_tier_config(self, tier_config_id):\n text, code = ApiClient(self._config, 'tier/configs/' + tier_config_id).get()\n return TierConfig.deserialize(text)", "def __init__(__self__, *,\n tier: str,\n email: Optional[str] = None,\n link: Optional[str] = None,\n name: Optional[str] = None):\n pulumi.set(__self__, \"tier\", tier)\n if email is not None:\n pulumi.set(__self__, \"email\", email)\n if link is not None:\n pulumi.set(__self__, \"link\", link)\n if name is not None:\n pulumi.set(__self__, \"name\", name)", "def get_env_type ( base_name ) :\n return base_name.split( '-', 1 )[ 0 ]", "def get_env_class(environment_type):\n if environment_type == \"vanilla\":\n return city.CityGridEnv\n elif environment_type == \"distraction\":\n return city.DistractionGridEnv\n elif environment_type == \"map\":\n return city.MapGridEnv\n elif environment_type == \"cooking\":\n return cooking.CookingGridEnv\n elif environment_type == \"miniworld_sign\":\n # Dependencies on OpenGL, so only load if absolutely necessary\n from envs.miniworld import sign\n return sign.MiniWorldSign\n else:\n raise ValueError(\n \"Unsupported environment type: {}\".format(environment_type))", "def train_tier(args: argparse.Namespace, hp: HParams, tier: int, extension_architecture: str,\n timestamp: str, tensorboardwriter: TensorboardWriter,\n logger: logging.Logger) -> None:\n logger.info(f\"Start training of tier {tier}/{hp.network.n_tiers}\")\n\n # Setup the data ready to be consumed\n train_dataloader, test_dataloader, num_samples = get_dataloader(hp)\n\n # Setup tier\n # Calculate size of FREQ dimension for this tier\n tier_freq = tierutil.get_size_freqdim_of_tier(n_mels=hp.audio.mel_channels,\n n_tiers=hp.network.n_tiers,\n tier=tier)\n\n if tier == 1:\n model = Tier1(tier=tier,\n n_layers=hp.network.layers[tier - 1],\n hidden_size=hp.network.hidden_size,\n gmm_size=hp.network.gmm_size,\n freq=tier_freq)\n else:\n model = Tier(tier=tier,\n n_layers=hp.network.layers[tier - 1],\n hidden_size=hp.network.hidden_size,\n gmm_size=hp.network.gmm_size,\n freq=tier_freq)\n model = model.to(hp.device)\n model.train()\n\n # Setup loss criterion and optimizer\n criterion = GMMLoss()\n optimizer = torch.optim.RMSprop(params=model.parameters(),\n lr=hp.training.lr,\n momentum=hp.training.momentum)\n\n # Check if training has to be resumed from previous checkpoint\n if args.checkpoint_path is not None:\n model, optimizer = resume_training(args, hp, tier, model, optimizer, logger)\n else:\n logger.info(f\"Starting new training on dataset {hp.data.dataset} with configuration file \"\n f\"name {hp.name}\")\n\n # Train the tier\n total_iterations = 0\n loss_logging = 0 # accumulated loss between logging iterations\n loss_save = 0 # accumulated loss between saving iterations\n prev_loss_onesample = 1e8 # used to compare between saving iterations and decide whether or not\n # to save the model\n\n for epoch in range(hp.training.epochs):\n logger.info(f\"Epoch: {epoch}/{hp.training.epochs} - Starting\")\n for i, (waveform, utterance) in enumerate(train_dataloader):\n\n # 1.1 Transform waveform input to melspectrogram and apply preprocessing to normalize\n waveform = waveform.to(device=hp.device, non_blocking=True)\n spectrogram = transforms.wave_to_melspectrogram(waveform, hp)\n spectrogram = audio_normalizing.preprocessing(spectrogram, hp)\n # 1.2 Get input and output from the original spectrogram for this tier\n input_spectrogram, output_spectrogram = tierutil.split(spectrogram=spectrogram,\n tier=tier,\n n_tiers=hp.network.n_tiers)\n length_spectrogram = input_spectrogram.size(2)\n # 2. Clear the gradients\n optimizer.zero_grad()\n # 3. Compute the model output\n if tier == 1:\n # generation is unconditional so there is only one input\n mu_hat, std_hat, pi_hat = model(spectrogram=input_spectrogram)\n else:\n # generation is conditional on the spectrogram generated by previous tiers\n mu_hat, std_hat, pi_hat = model(spectrogram=output_spectrogram,\n spectrogram_prev_tier=input_spectrogram)\n # 4. Calculate the loss\n loss = criterion(mu=mu_hat, std=std_hat, pi=pi_hat, target=output_spectrogram)\n del spectrogram\n del mu_hat, std_hat, pi_hat\n\n # 4.1 Check if loss has exploded\n if torch.isnan(loss) or torch.isinf(loss):\n error_msg = f\"Loss exploded at Epoch: {epoch}/{hp.training.epochs} - \" \\\n f\"Iteration: {i * hp.training.batch_size}/{num_samples}\"\n logger.error(error_msg)\n raise Exception(error_msg)\n\n # 5. Perform backpropagation\n loss_cpu = loss.item()\n loss.backward()\n optimizer.step()\n\n # 6. Logging and saving model\n loss_oneframe = loss_cpu / (length_spectrogram * hp.training.batch_size)\n loss_logging += loss_oneframe # accumulated loss between logging iterations\n loss_save += loss_oneframe # accumulated loss between saving iterations\n\n # 6.1 Save model (if is better than previous tier)\n if (total_iterations + 1) % hp.training.save_iterations == 0:\n # Calculate average loss of one sample of a batch\n loss_onesample = int(loss_save / hp.training.save_iterations)\n # if loss_onesample of these iterations is lower, the tier is better and we save it\n if loss_onesample < prev_loss_onesample:\n path = f\"{hp.training.dir_chkpt}/tier{tier}_{timestamp}_loss{loss_onesample}.pt\"\n torch.save(obj={'dataset': hp.data.dataset,\n 'tier_idx': tier,\n 'hp': hp,\n 'epoch': epoch,\n 'iterations': i,\n 'total_iterations': total_iterations,\n 'tier': model.state_dict(),\n 'optimizer': optimizer.state_dict()}, f=path)\n logger.info(f\"Model saved to: {path}\")\n prev_loss_onesample = loss_onesample\n loss_save = 0\n\n # 6.2 Logging\n if (total_iterations + 1) % hp.logging.log_iterations == 0:\n # Calculate average loss of one sample of a batch\n loss_onesample = int(loss_logging / hp.logging.log_iterations)\n tensorboardwriter.log_training(hp, loss_onesample, total_iterations)\n logger.info(f\"Epoch: {epoch}/{hp.training.epochs} - \"\n f\"Iteration: {i * hp.training.batch_size}/{num_samples} - \"\n f\"Loss: {loss_onesample}\")\n loss_logging = 0\n\n # 6.3 Evaluate\n if (total_iterations + 1) % hp.training.evaluation_iterations == 0:\n evaluation(hp, tier, test_dataloader, model, criterion, logger)\n total_iterations += 1\n\n # After finishing training: save model, hyperparameters and total loss\n path = f\"{hp.training.dir_chkpt}/tier{tier}_{timestamp}_final.pt\"\n torch.save(obj={'dataset': hp.data.dataset,\n 'tier_idx': tier,\n 'hp': hp,\n 'epoch': epoch,\n 'iterations': evaluation(hp, tier, test_dataloader, model, criterion,\n logger),\n 'total_iterations': total_iterations,\n 'tier': model.state_dict(),\n 'optimizer': optimizer.state_dict()}, f=path)\n logger.info(f\"Model saved to: {path}\")\n tensorboardwriter.log_end_training(hp=hp, loss=-1)\n logger.info(\"Finished training\")", "def __init__(__self__, *,\n capacity: Optional[pulumi.Input[int]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tier: Optional[pulumi.Input[str]] = None):\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)", "def test_tiers_tier_level_tier_name_get(self):\n pass", "def setUp(self) -> None:\n self.ec2 = boto3.resource('ec2')\n self.ec2_client = boto3.client('ec2')\n self.sts = boto3.client('sts')\n self.iam = boto3.client('iam')\n self.autoscaling = boto3.client('autoscaling')\n\n self.prod_env = prod_env", "def tier_1440p(self, tier_1440p):\n\n self._tier_1440p = tier_1440p", "def _delete(self, tier_uuid):\n\n tier = objects.storage_tier.get_by_uuid(pecan.request.context, tier_uuid)\n\n # Semantic checks\n _check(self, \"delete\", tier.as_dict())\n\n # update the crushmap by removing the tier\n try:\n self._ceph.crushmap_tier_delete(tier.name)\n except exception.CephCrushMapNotApplied:\n # If crushmap has not been applied then there is no rule to update.\n pass\n\n try:\n pecan.request.dbapi.storage_tier_destroy(tier.id)\n except exception.HTTPNotFound:\n msg = _(\"Failed to delete storage tier %s.\" % tier.name)\n raise wsme.exc.ClientSideError(msg)", "def test_add_tier(self, mock_client):\n\n productRelease = collections.OrderedDict([(u'productName', PRODUCT1),\n (u'version', VERSION)])\n tierDto = collections.OrderedDict([(u'name', \"TIER\"), (u'flavour', \"flavour\"),\n (u'image', \"image\"),\n (u'productReleaseDtos', productRelease)])\n template = Template(TEMPLATE_NAME, TEMPLATE_DESCRIPTION)\n template.template_id = \"ID\"\n\n class Object(object):\n pass\n newtemplate = Object()\n newtemplate.id = \"ID\"\n\n mock_client.create_env_template.return_value = newtemplate\n mock_client.get_image_name.return_value = \"image\"\n Config.Clients = mock_client\n\n template.add_tiers(tierDto)\n self.assertEquals(len(template.tiers), 1)", "def total_egress_bandwidth_tier(self) -> Optional[pulumi.Input['NetworkPerformanceConfigTotalEgressBandwidthTier']]:\n return pulumi.get(self, \"total_egress_bandwidth_tier\")", "def _env_switch(environment: str, prod_value: T, qa_value: T) -> T:\n if environment == PROD:\n return prod_value\n return qa_value", "def __init__(__self__, *,\n resource_group: pulumi.Input[str],\n access_tier: Optional[pulumi.Input[str]] = None,\n data_lake_enabled: Optional[pulumi.Input[bool]] = None,\n kind: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n network_rule: Optional[pulumi.Input['StorageAccountSpecNetworkRuleArgs']] = None,\n sku: Optional[pulumi.Input['StorageAccountSpecSkuArgs']] = None,\n supports_https_traffic_only: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"resource_group\", resource_group)\n if access_tier is not None:\n pulumi.set(__self__, \"access_tier\", access_tier)\n if data_lake_enabled is not None:\n pulumi.set(__self__, \"data_lake_enabled\", data_lake_enabled)\n if kind is not None:\n pulumi.set(__self__, \"kind\", kind)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if network_rule is not None:\n pulumi.set(__self__, \"network_rule\", network_rule)\n if sku is not None:\n pulumi.set(__self__, \"sku\", sku)\n if supports_https_traffic_only is not None:\n pulumi.set(__self__, \"supports_https_traffic_only\", supports_https_traffic_only)", "def set_mode(self, pardus_profile):\n\n #TODO: How to determine mode (adhoc or infrastructure) from old profile settings\n return \"infrastructure\"", "def _staging():\n env.environment = 'staging'\n env.server_name = 'project-staging.dimagi.com'\n env.hosts = [settings.STAGING_HOST]", "def tier_trading(self, item):\r\n\r\n # Initial tier is the item the user has going into the store and final\r\n # tier is the item the user has when leaving the store\r\n initial_tier = self.item.tier\r\n final_tier = item.tier\r\n\r\n # Not allowing items that are too large to be carried\r\n if item.size is False:\r\n self.add = False\r\n print(\"The\", item.name_i, \"is too big to carry around the mall.\" +\r\n \"\\nPlease select a different item.\\n\\nAfter you have\" +\r\n \" checked all items, if no item of the proper tier\" +\r\n \" exists\\nplease type [4] to leave the store.\")\r\n\r\n # Standard jumping of tier trading and checking to make sure the final\r\n # tier is one tier higher than the initial tier\r\n elif final_tier == initial_tier + 1:\r\n self.add = True\r\n\r\n # Jumping exceptions; if the initial item is earrings, that can jump\r\n # to purse, and if the initial item is iPod_Shuffle, that can jump\r\n # to Air_Jordan_Space_Jam_11\r\n elif self.item.name_i == 'Earrings' and item.name_i == 'Purse':\r\n self.add = True\r\n print(\"You have hit a jumping exception and get to skip a tier!\")\r\n\r\n elif (self.item.name_i == 'iPod_Shuffle' and\r\n item.name_i == 'Air_Jordan_Space_Jam_11'):\r\n self.add = True\r\n print(\"You have hit a jumping exception and get to skip a tier!\")\r\n\r\n # If the tier is not acceptable we have to set self.add back to False\r\n else:\r\n self.add = False\r\n print(\"You are not allowed to select items in that tier.\"\r\n \"\\n\\nPlease pick another item one tier higher than your\" +\r\n \" current tier.\\n\\nAfter you have checked all items,\" +\r\n \" if no item of the proper tier exists,\\nplease type [4]\" +\r\n \" to leave the store.\")", "def __init__(__self__, *,\n capacity: Optional[pulumi.Input[int]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tier: Optional[pulumi.Input[str]] = None):\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if name is None:\n name = 'S0'\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if tier is None:\n tier = 'Standard'\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)", "def _get_tier(pkmn_id):\n if pkmn_id in tiers.TIERS[\"0\"]:\n return 0\n elif pkmn_id in tiers.TIERS[\"1\"]:\n return 1\n elif pkmn_id in tiers.TIERS[\"2\"]:\n return 2\n elif pkmn_id in tiers.TIERS[\"3\"]:\n return 3\n else:\n return 4", "def deployment_environment(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"deployment_environment\")", "def get_current_environment(self):\n for env in self.indicators:\n if self._is_env_indicator_in_url(self.indicators[env]):\n return env\n\n return Environment.PRODUCTION", "def _get_environment():\n namespace = current_app.config.get('POD_NAMESPACE').lower()\n if namespace.endswith('dev'):\n return 'DEV'\n if namespace.endswith('test'):\n return 'TEST'\n if namespace.endswith('tools'):\n return 'SANDBOX'\n return ''", "def __init__(__self__, *,\n disabled: Optional[pulumi.Input[bool]] = None,\n load_balancer_type: Optional[pulumi.Input['CloudRunConfigLoadBalancerType']] = None):\n if disabled is not None:\n pulumi.set(__self__, \"disabled\", disabled)\n if load_balancer_type is not None:\n pulumi.set(__self__, \"load_balancer_type\", load_balancer_type)", "def dl_tier(self, tier):\n\n tier_df = pd.DataFrame()\n\n for t in self.tier_tables[tier]:\n\n for y in self.years:\n\n df = get_GHGRP_records(y, t)\n\n tier_df = tier_df.append(df, sort=True, ignore_index=True)\n\n tier_df.columns = [x.lower() for x in tier_df.columns]\n\n # Fix issues with natural gas HHV reporting\n # Other fuel HHVs were exammined manually. There's a wide range for\n # wood and wood residuals, but not other fuels.\n if tier == 't2_hhv':\n\n tier_df['high_heat_value'] = \\\n tier_df.high_heat_value.astype('float32')\n\n natgas_st_index = tier_df[\n (tier_df.fuel_type == 'Natural Gas (Weighted U.S. Average)') &\n (tier_df.high_heat_value_uom == 'mmBtu/short ton')\n ].index\n\n tier_df.loc[natgas_st_index, 'high_heat_value_uom'] = 'mmBtu/scf'\n\n m_index = tier_df[\n (tier_df.fuel_type == 'Natural Gas (Weighted U.S. Average)') &\n (tier_df.high_heat_value.between(1, 1.2))\n ].index\n\n tier_df.high_heat_value.update(\n tier_df.loc[m_index, 'high_heat_value'].divide(1000)\n )\n\n drop_index = tier_df[\n (tier_df.fuel_type == 'Natural Gas (Weighted U.S. Average)') &\n (tier_df.high_heat_value.between(0.0012, 0.0014))\n ].index\n\n tier_df = tier_df[~tier_df.index.isin(drop_index)]\n\n return tier_df", "def production_settings_name():\n if hasattr(SettingsType, 'AWS'):\n # Hawthorn and Ironwood\n return getattr(SettingsType, 'AWS')\n else:\n # Juniper and beyond.\n return getattr(SettingsType, 'PRODUCTION')", "def create_pool(self, device, tier, poolname):\n print \"Adding pool %s...\" % poolname\n pool = device.findRemoteStoragePool(StoragePoolPredicates.name(poolname))\n pool.setTier(tier)\n pool.save()\n return pool", "def _production():\n env.environment = 'production'\n env.server_name = 'project-production.dimagi.com'\n env.hosts = [settings.PRODUCTION_HOST]", "def type(self):\n return EB.EnvType.ROBOSUITE_TYPE", "def tiers(self, args):\n parser = OptionParser(usage=\"vdc tiers <options>\")\n parser.add_option(\"-n\", \"--name\",\n help=\"The name of the virtual datacenter\", dest=\"name\")\n (options, args) = parser.parse_args(args)\n name = options.name\n if not name:\n parser.print_help()\n return\n\n # Once user input has been read, find the virtual datacenter\n try:\n cloud = self._context.getCloudService()\n vdc = cloud.findVirtualDatacenter(\n VirtualDatacenterPredicates.name(name))\n if vdc:\n tiers = vdc.listStorageTiers()\n pprint_tiers(tiers)\n else:\n print \"No virtual datacenter found with name: %s\" % name\n except (AbiquoException, AuthorizationException), ex:\n print \"Error: %s\" % ex.getMessage()", "def _create_deployment(self) -> aws.apigateway.Stage:\n deployment = aws.apigateway.Deployment(\n f\"{self.rest_api._name}-deployment\",\n rest_api=self.rest_api.id,\n # TODO: Still want to have a triggers function\n opts=pulumi.ResourceOptions(\n parent=self, depends_on=[p.lambda_integration for p in self.proxies]\n ),\n )\n\n stage = aws.apigateway.Stage(\n f\"{self.rest_api._name}-prod-stage\",\n deployment=deployment.id,\n rest_api=self.rest_api.id,\n stage_name=\"prod\",\n opts=pulumi.ResourceOptions(parent=self),\n )\n\n return stage", "def _get_environment(cls):\n return cls.__name__.lower()", "def create_loadbalancer(self, context, lb):\n super(ArrayDeviceDriverV2, self).create_loadbalancer(context, lb)\n deployment_model = self._get_setting(\n lb.tenant_id, \"lbaas_settings\", \"deployment_model\"\n )\n if deployment_model == \"PER_LOADBALANCER\":\n self.update_loadbalancer(context, lb, None)", "def get_environment_class_by_name(environment_type):\n for cls in util.iter_subclasses(Environment):\n if cls.tool_name == environment_type:\n return cls\n raise EnvironmentUnavailable(\n f\"Unknown environment type '{environment_type}'\")", "def __init__(__self__, *,\n application_name: pulumi.Input[str],\n cname_prefix: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n environment_name: Optional[pulumi.Input[str]] = None,\n operations_role: Optional[pulumi.Input[str]] = None,\n option_settings: Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentOptionSettingArgs']]]] = None,\n platform_arn: Optional[pulumi.Input[str]] = None,\n solution_stack_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentTagArgs']]]] = None,\n template_name: Optional[pulumi.Input[str]] = None,\n tier: Optional[pulumi.Input['EnvironmentTierArgs']] = None,\n version_label: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"application_name\", application_name)\n if cname_prefix is not None:\n pulumi.set(__self__, \"cname_prefix\", cname_prefix)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if environment_name is not None:\n pulumi.set(__self__, \"environment_name\", environment_name)\n if operations_role is not None:\n pulumi.set(__self__, \"operations_role\", operations_role)\n if option_settings is not None:\n pulumi.set(__self__, \"option_settings\", option_settings)\n if platform_arn is not None:\n pulumi.set(__self__, \"platform_arn\", platform_arn)\n if solution_stack_name is not None:\n pulumi.set(__self__, \"solution_stack_name\", solution_stack_name)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if template_name is not None:\n pulumi.set(__self__, \"template_name\", template_name)\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)\n if version_label is not None:\n pulumi.set(__self__, \"version_label\", version_label)", "def isolationforest(data_set, types):\n if types == 'Global':\n clf = IsolationForest(random_state=19,n_estimators=200)\n else:\n clf = IForest(random_state=10,n_estimators=200)\n clf.fit(data_set)\n pred = clf.predict(data_set)\n return pred", "def set_env_config(self):\n self.env_config = {\n # ===== STANDARD ARGUMENTS ======\n \"n_agents\": 4, # Number of non-planner agents\n \"world_size\": [15, 15], # [Height, Width] of the env world\n \"episode_length\": 1000, # Number of time-steps per episode\n # In multi-action-mode, the policy selects an action for each action\n # subspace (defined in component code)\n # Otherwise, the policy selects only 1 action\n \"multi_action_mode_agents\": False,\n \"multi_action_mode_planner\": True,\n # When flattening observations, concatenate scalar & vector observations\n # before output\n # Otherwise, return observations with minimal processing\n \"flatten_observations\": False,\n # When Flattening masks, concatenate each action subspace mask\n # into a single array\n # Note: flatten_masks = True is recommended for masking action logits\n \"flatten_masks\": True,\n # ===== COMPONENTS =====\n # Which components to use\n \"components\": [\n # (1) Building houses\n {\"Build\": {}},\n # (2) Trading collectible resources\n {\"ContinuousDoubleAuction\": {\"max_num_orders\": 5}},\n # (3) Movement and resource collection\n {\"Gather\": {}},\n ],\n # ===== SCENARIO =====\n # Which scenario class to use\n \"scenario_name\": \"uniform/simple_wood_and_stone\",\n # (optional) kwargs of the chosen scenario class\n \"starting_agent_coin\": 10,\n \"starting_stone_coverage\": 0.10,\n \"starting_wood_coverage\": 0.10,\n }\n\n # Create an environment instance from the config\n self.env = foundation.make_env_instance(**self.env_config)", "def setup_test_tenant(self):\n self.test_tenant = rand_name('test_tenant_')\n self.test_description = rand_name('desc_')\n resp, self.tenant = self.client.create_tenant(\n name=self.test_tenant,\n description=self.test_description)\n self.tenants.append(self.tenant)", "def __init__(self, environment=None):\n if environment is None:\n environment = os.environ.get(\"SENTERA_ENV\") or \"prod\"\n environment = environment.lower()\n self.environment = environment\n\n if self.environment == \"prod\":\n self.config = {\n \"sentera_api_url\": \"https://api.sentera.com\",\n \"weather_api_url\": \"https://weather.sentera.com\",\n }\n else:\n self.config = {\n \"sentera_api_url\": f\"https://api{self.environment}.sentera.com\",\n \"weather_api_url\": f\"https://weather{self.environment}.sentera.com\",\n }\n\n if ENV_SENTERA_API_URL in os.environ:\n self.config[\"sentera_api_url\"] = os.environ.get(ENV_SENTERA_API_URL)\n\n if ENV_WEATHER_API_URL in os.environ:\n self.config[\"weather_api_url\"] = os.environ.get(ENV_WEATHER_API_URL)", "def detail(self, tier_uuid=None, marker=None, limit=None,\n sort_key='id', sort_dir='asc'):\n\n parent = pecan.request.path.split('/')[:-1][-1]\n if parent != 'storage_tiers':\n raise exception.HTTPNotFound\n\n expand = True\n resource_url = '/'.join(['storage_tiers', 'detail'])\n return self._get_tiers_collection(tier_uuid, marker, limit,\n sort_key, sort_dir, expand,\n resource_url)", "def test_create_storage_tiered_rate(self):\n storage_rates = (\n metric_constants.OCP_METRIC_STORAGE_GB_REQUEST_MONTH,\n metric_constants.OCP_METRIC_STORAGE_GB_USAGE_MONTH,\n )\n for storage_rate in storage_rates:\n ocp_data = {\n \"name\": \"Test Cost Model\",\n \"description\": \"Test\",\n \"source_type\": Provider.PROVIDER_OCP,\n \"providers\": [{\"uuid\": self.provider.uuid, \"name\": self.provider.name}],\n \"rates\": [\n {\n \"metric\": {\"name\": storage_rate},\n \"tiered_rates\": [\n {\"unit\": \"USD\", \"value\": 0.22, \"usage\": {\"usage_start\": None, \"usage_end\": 10.0}},\n {\"unit\": \"USD\", \"value\": 0.26, \"usage\": {\"usage_start\": 10.0, \"usage_end\": None}},\n ],\n }\n ],\n \"currency\": \"USD\",\n }\n\n with tenant_context(self.tenant):\n instance = None\n serializer = CostModelSerializer(data=ocp_data, context=self.request_context)\n if serializer.is_valid(raise_exception=True):\n instance = serializer.save()\n self.assertIsNotNone(instance)\n self.assertIsNotNone(instance.uuid)", "def run_on_tier(self, tier, tier_y=None):\n if tier_y is None:\n return None\n\n logging.info(\"Apply sppasFilter() on tier: {:s}\".format(tier.get_name()))\n sfilter = sppasTierFilters(tier)\n\n logging.debug(\"Data in RelationFilterProcess: {:s}\".format(self.data))\n ann_set = sfilter.rel(tier_y,\n *(self.data[0]),\n **{self.data[1][i][0]: self.data[1][i][1] for i in range(len(self.data[1]))})\n\n # convert the annotations set into a tier\n filtered_tier = ann_set.to_tier(name=self.tier_name,\n annot_value=self.annot_format)\n\n return filtered_tier", "def create_infrastructure_storage(config, context, dc):\n print \"### Configuring storage ###\"\n storage = InfrastructureStorage(context)\n tier = storage.configure_tiers(dc, config.get(\"tier\", \"name\"))\n try: \n user = config.get(\"device\", \"user\")\n password= config.get(\"device\", \"password\")\n except NoOptionError:\n user = None\n password = None\n device = storage.create_device(dc, config.get(\"device\", \"name\"),\n StorageTechnologyType.valueOf(config.get(\"device\", \"type\")),\n config.get(\"device\", \"address\"),\n config.get(\"device\", \"address\"),\n user, password)\n\n storage.create_pool(device, tier, config.get(\"pool\", \"name\"))", "def pricing_tiers(self, pricing_tiers):\n\n self._pricing_tiers = pricing_tiers", "def production():\n env.run = run\n env.cd = cd\n env.deployment = 'remote'", "def set_sizing_environment():\n # Creates a sizing executor factory to output communication cost\n # after the training finishes. Note that sizing executor only provides an\n # estimate (not exact) of communication cost, and doesn't capture cases like\n # compression of over-the-wire representations. However, it's perfect for\n # demonstrating the effect of compression in this tutorial.\n sizing_factory = tff.framework.sizing_executor_factory()\n\n # TFF has a modular runtime you can configure yourself for various\n # environments and purposes, and this example just shows how to configure one\n # part of it to report the size of things.\n context = tff.framework.ExecutionContext(executor_fn=sizing_factory)\n tff.framework.set_default_context(context)\n\n return sizing_factory", "def delete(self, tier_uuid):\n\n if self._from_cluster:\n raise exception.OperationNotPermitted\n\n _delete(self, tier_uuid)", "def get_environment():\n # Auto-set settings object based on App Engine dev environ\n if 'SERVER_SOFTWARE' in os.environ:\n if os.environ['SERVER_SOFTWARE'].startswith('Dev'):\n return Config.ENV_LOCAL\n elif os.environ['SERVER_SOFTWARE'].startswith('Google App Engine/'):\n #For considering an environment staging we assume the version id\n # contains -staging and the URL\n current_version_id = str(os.environ['CURRENT_VERSION_ID']) if (\n 'CURRENT_VERSION_ID') in os.environ else ''\n if '-staging' in current_version_id:\n return Config.ENV_STAGING\n #If not local or staging then is production TODO: really?\n return Config.ENV_PRODUCTION\n return Config.ENV_LOCAL", "def environment(self, environment):\n\n self._set_field(\"environment\", environment.get_json())", "def get_env(self):\n self.airflow_cluster_name = conf.get('core', 'cluster')\n bicommon = BICommon()\n self.env_type = bicommon.env\n\n self.parameters.update({'airflow_cluster_name': self.airflow_cluster_name, 'env': self.env_type})" ]
[ "0.69780785", "0.68621033", "0.65023184", "0.6452587", "0.6355082", "0.6355082", "0.6355082", "0.62773496", "0.62122846", "0.6167871", "0.6167871", "0.6167871", "0.6167871", "0.6113269", "0.6034399", "0.60080045", "0.598394", "0.5843236", "0.5833414", "0.5829682", "0.5780073", "0.57668823", "0.5703262", "0.5683552", "0.56024295", "0.5565308", "0.55319", "0.5365069", "0.53202933", "0.52909493", "0.51845306", "0.516346", "0.5017035", "0.49587128", "0.49587128", "0.49086514", "0.4875058", "0.4850388", "0.48449057", "0.47659385", "0.47656587", "0.47636816", "0.47338787", "0.46951714", "0.46942288", "0.4672011", "0.46354562", "0.4617859", "0.46135455", "0.46046597", "0.45892385", "0.45726937", "0.45664883", "0.45529157", "0.45289317", "0.45262626", "0.44977087", "0.44969457", "0.4496933", "0.44914123", "0.44186178", "0.44092256", "0.43937883", "0.43839732", "0.4368276", "0.43540075", "0.43488753", "0.43439138", "0.43395057", "0.4317747", "0.42800385", "0.42511585", "0.42467698", "0.42383313", "0.42326152", "0.42193335", "0.4218103", "0.4213811", "0.4208186", "0.41816798", "0.417953", "0.41623065", "0.41527703", "0.41489965", "0.41475275", "0.41428354", "0.41386175", "0.41382325", "0.4137227", "0.41338736", "0.41287598", "0.41243827", "0.41230252", "0.41215703", "0.41143405", "0.40999612", "0.40915263", "0.40786305", "0.40771934", "0.4064519" ]
0.7524803
0
The name of the application version to deploy.
def version_label(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "version_label")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def version_name(self) -> str:\n return pulumi.get(self, \"version_name\")", "def app_version(self) -> str:\n return pulumi.get(self, \"app_version\")", "def version_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version_name\")", "def get_package_name(self):\n return self.name + '-' + self.version", "def name(self):\n return _version._NAME # pylint: disable=protected-access", "def get_name():\n return config.APP_NAME", "def get_version(self):\n return self.cur_config['version']['name']", "def get_version_name(self):\n\t\tif self.have_metadata is False:\n\t\t\tself._get_metadata()\n\t\t\tself.have_metadata = True\n\n\t\ttry:\n\t\t\treturn self.keyinfo['context_tags'].attrs['version_name']\n\t\texcept:\n\t\t\treturn None", "def get_package_name(self):\n return self.name + '-' + self.version + '-' + self.release", "def app_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"app_name\")", "def get_res_name():\n return os.getenv(\"RESOURCES_VERSION\", \"res_0.0\")", "def getApplicationReleaseName(self) -> unicode:\n ...", "def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_name\")", "def get_version_tag(self, version: str) -> str:\n return version", "def getApplicationVersion(self) -> unicode:\n ...", "def _app_id(self):\n return '{}-{}'.format(self.config['app']['name'],\n self.config['app']['version'])", "def get_version(cls):\n if Config.ENV_TYPE == PRD:\n return Config.version + \"/\" + Config.build\n return Config.version + \"/\" + Config.build + \"/\" + Config.generate + ' (' + Config.ENV_NAME + ')'", "def app_name(self) -> str:\n return self._app_name", "def fullname(self):\n return \"{project}/{version}\".format(\n project=self.project.name, version=self.name\n )", "def get_version(self):\n data = self._get('app_version')\n return data['version']", "def _branch_name(cls, version: Version) -> str:\n suffix = version.public[len(version.base_version) :]\n components = version.base_version.split(\".\") + [suffix]\n if suffix != \"\" and not (\n suffix.startswith(\"rc\")\n or suffix.startswith(\"a\")\n or suffix.startswith(\"b\")\n or suffix.startswith(\".dev\")\n ):\n raise ValueError(f\"Unparseable pants version number: {version}\")\n return \"{}.{}.x\".format(*components[:2])", "def print_app_version(app_name):\n print_file('{}/current/version.txt'.format(get_app_basedir(app_name)))", "def get_version(self) -> str:\n return versioning.get_version()", "def app_name(self):\n return self._app_name", "def get_application_version(self):\n return self.connector.request('GET', '/app/version')", "def kms_key_version_name(self) -> str:\n return pulumi.get(self, \"kms_key_version_name\")", "def app_name(self): # pylint:disable=function-redefined\n return self._app_name", "def application_name(self) -> Optional[str]:\n return pulumi.get(self, \"application_name\")", "def app_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_name\")", "def version(self) -> str:\n return pulumi.get(self, \"version\")", "def version(self) -> str:\n return pulumi.get(self, \"version\")", "def version(self) -> str:\n return pulumi.get(self, \"version\")", "def version(self) -> str:\n return pulumi.get(self, \"version\")", "def version() -> str:\n with open(join(dirname(__file__), 'resources', 'VERSION')) as f:\n return f.read().strip()", "def name(self):\n return self.application_tree['name']", "def name(self):\n\n return self.manifest[\"name\"]", "def product(self):\n return self.appName", "def get_version():\n return \".\".join([str(i) for i in config[\"version\"]])", "def version(self) -> str:\n return '0.1'", "def version(self):\n return self.proto.details.appDetails.versionString", "def version(self):\n\n return self.manifest[\"version\"]", "def get_version():\n return '.'.join(map(str, VERSION))", "def path_name(self):\n return u'{0}-{1}'.format(self.plugin.name, self._major_version)", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version():\n from app import get_version\n\n return render_template(\"version.html\", version=get_version())", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def getApplicationName(self) -> unicode:\n ...", "def get_raw_server_name():\n from google.appengine.api import app_identity\n return '%s.%s.appspot.com' % (os.environ[\n 'CURRENT_VERSION_ID'].split('.')[0], app_identity.get_application_id())", "def app_version_id(self):\n return self._app_version_id", "def get_version():\n return \"0.0.1 (prerelease prototype)\"", "def get_api_version(self):\n from webapi import VERSION\n return '.'.join(map(str, VERSION))", "def version_label(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"version_label\")", "def get_version() -> str:\n return __version__", "def name(self):\n return self._env_name", "def _get_app_name(app):\n return app[APP_NAME_KEY]", "def version_code(self) -> str:\n return pulumi.get(self, \"version_code\")", "def app_name(self):\n module_filepath = inspect.getfile(type(self))\n parent_dir = os.path.dirname\n app_dirpath = parent_dir(parent_dir(parent_dir(module_filepath)))\n app_name = os.path.basename(app_dirpath)\n return app_name", "def display_name(self) -> str:\n if self.is_verified:\n return f\"Verified Package {self.csharp_version}\"\n elif self.is_main:\n return \"main (unstable)\"\n else:\n return self.release_tag.replace(\"_\", \" \").title()", "def join_app_version(appname,version,platform):\n return \"%s-%s.%s\" % (appname,version,platform,)", "def to_release_brach_name(self) -> str:\n return f\"release/{self.major}.{self.minor}\"", "def get_name(app):\n from uuid import uuid4 as uuid\n return (f'accelpy_{app[\"application\"][\"product_id\"]}'\n f'_{str(uuid()).replace(\"-\", \"\")[:8]}')", "def version(self) -> str:\n data = \"none yet\"\n if self.STARTED:\n data = (\n self.about.get(\"Version\")\n or self.about.get(\"Installed Version\")\n or \"DEMO\"\n )\n data = data.replace(\"_\", \".\")\n return data", "def package_name(self) -> str:\n return pulumi.get(self, \"package_name\")", "def get_app_hostname():\n if not is_running_on_app_engine() or is_running_on_localhost():\n return None\n\n version = modules.get_current_version_name()\n app_id = app_identity.get_application_id()\n\n suffix = 'appspot.com'\n\n if ':' in app_id:\n tokens = app_id.split(':')\n api_name = tokens[1]\n if tokens[0] == 'google.com':\n suffix = 'googleplex.com'\n else:\n api_name = app_id\n\n # Check if this is the default version\n default_version = modules.get_default_version()\n if version == default_version:\n return '{0}.{1}'.format(app_id, suffix)\n else:\n return '{0}-dot-{1}.{2}'.format(version, api_name, suffix)", "def application_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_name\")", "def get_version_string():\n\n version_string = get_version()\n if not version_string:\n version_string = \"unknown\"\n\n return \"ImageSplit version \" + version_string", "def compute_name(self):\n version_id_str = DELIMITER.join(\n sorted(v.kf_id for v in self.versions.all())\n )\n return DELIMITER.join([NAME_PREFIX, version_id_str])", "def get_app_name(self):\n return getattr(self, '_app_name', None)", "def get_egg_name():\n global eggname\n if not eggname:\n version = local('git describe --abbrev=4', capture=True)\n if version:\n version = '%s-%s' % (version, datetime.datetime.today().strftime('%Y%m%d'))\n eggname = APP_NAME + '-%s-py%s.egg' % (version.replace('-', '_'), python_version)\n return eggname", "def getXsdVersionName(self):\n vers = self.getVersion()\n if vers is None:\n return None\n\n # Determine the filename\n bname = os.path.basename(self.__pathin)\n dname = bname.split(\".\")[0]\n\n dc = DictConfig()\n prefix = dc.get_prefix(dname)\n if prefix:\n vout = \"%s-v%s.xsd\" % (prefix, vers)\n return vout\n\n return None", "def version(self) -> str:\n return self._version", "def version(self) -> str:\n return self._version", "def version(self) -> str:\n return self._version", "def version(self) -> str:\n return self._version", "def version(self) -> str:\n return self._version", "def makeReleaseFileName(cls, version: str) -> str:\n\n from peek_platform import PeekPlatformConfig\n\n return os.path.join(\n PeekPlatformConfig.config.platformSoftwarePath,\n 'peek-release-%s.tar.gz' % version)", "def version_string():\n git_hash = current_git_hash()\n if git_hash:\n return \"pyhole v%s (%s) - https://github.com/jk0/pyhole\" % (\n __VERSION__, git_hash)\n\n return \"pyhole v%s - https://github.com/jk0/pyhole\" % __VERSION__", "def string(self) -> str:\n version = RE_VERSION.match(str(self._version)).group(2)\n if version.endswith(\".\"):\n version = version[:-1]\n return version", "def get_name(self, name):\n return self.apps[name]['name']", "def env_name(self):\n return f\"{self.project_name}-{self.stage}\"", "def launch_template_version(self) -> Optional[str]:\n return pulumi.get(self, \"launch_template_version\")", "def version_string(self):\n return self.server_version", "def get_launch_name():\n\n if product_type == \"RHEL7\":\n launch_name = \"Errata-{0}_{1}_{2}_{3}_{4}_{5}CDN\".format(errata_id, product_type, variant, arch, test_level, cdn)\n \n elif product_type == \"RHEL8\":\n launch_name = \"Errata-{0}_{1}_{2}_{3}_{4}CDN\".format(errata_id, product_type, arch, test_level, cdn)\n\n return launch_name", "def config_version(self) -> str:\n return pulumi.get(self, \"config_version\")", "def config_version(self) -> str:\n return pulumi.get(self, \"config_version\")", "def config_version(self) -> str:\n return pulumi.get(self, \"config_version\")", "def config_version(self) -> str:\n return pulumi.get(self, \"config_version\")", "def config_version(self) -> str:\n return pulumi.get(self, \"config_version\")", "def getVersionString():\n return str(version_gen.major) + \".\" + str(version_gen.minor) + \".\" + str(version_gen.compilation)", "def version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"version\")", "def fhir_version_name(fhir_version):\n major_version = int(fhir_version.split('.')[0])\n\n if major_version < 3:\n return 'dstu2'\n elif (major_version >= 3) and (major_version < 4):\n return 'stu3'\n elif (major_version >= 4) and (major_version < 5):\n return 'r4'\n else:\n raise Exception(\n f'Invalid fhir version supplied: {fhir_version}! No name exists '\n 'for the supplied fhir version.'\n )", "def version(self):\n return self._get(\"version\")", "def _app(self) -> str:\n return self.charm.app.name" ]
[ "0.79632217", "0.7523307", "0.7298782", "0.7256948", "0.7225796", "0.7173701", "0.7093467", "0.70632595", "0.70144266", "0.691959", "0.6919559", "0.6915445", "0.6907794", "0.6907794", "0.68302757", "0.68228155", "0.67623806", "0.6752896", "0.6749308", "0.6721663", "0.67069286", "0.66878307", "0.6670897", "0.6666357", "0.66610336", "0.6635652", "0.6610046", "0.65865403", "0.65807474", "0.6574391", "0.65485287", "0.65485287", "0.65485287", "0.65485287", "0.6530117", "0.6526705", "0.65152895", "0.64809597", "0.6467439", "0.6456324", "0.6447868", "0.6447539", "0.6444029", "0.6443798", "0.6429212", "0.6429212", "0.6429212", "0.6429212", "0.6429212", "0.6413494", "0.63985884", "0.63985884", "0.63985884", "0.63985884", "0.6396656", "0.63869816", "0.63867474", "0.6381054", "0.6378667", "0.6366252", "0.6360576", "0.63595074", "0.6342515", "0.6334751", "0.63095266", "0.6307264", "0.6305537", "0.6304374", "0.629819", "0.6294148", "0.62809765", "0.6272927", "0.626856", "0.626311", "0.6262434", "0.6262268", "0.6260591", "0.6242499", "0.62410647", "0.62410647", "0.62410647", "0.62410647", "0.62410647", "0.6231511", "0.6227666", "0.62265426", "0.6224345", "0.6221779", "0.621174", "0.62088174", "0.619402", "0.6192683", "0.6192683", "0.6192683", "0.6192683", "0.6192683", "0.6191005", "0.61877835", "0.6185258", "0.61775166", "0.6167874" ]
0.0
-1
Get an existing Environment resource's state with the given name, id, and optional extra properties used to qualify the lookup.
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'Environment': opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = EnvironmentArgs.__new__(EnvironmentArgs) __props__.__dict__["application_name"] = None __props__.__dict__["cname_prefix"] = None __props__.__dict__["description"] = None __props__.__dict__["endpoint_url"] = None __props__.__dict__["environment_name"] = None __props__.__dict__["operations_role"] = None __props__.__dict__["option_settings"] = None __props__.__dict__["platform_arn"] = None __props__.__dict__["solution_stack_name"] = None __props__.__dict__["tags"] = None __props__.__dict__["template_name"] = None __props__.__dict__["tier"] = None __props__.__dict__["version_label"] = None return Environment(resource_name, opts=opts, __props__=__props__)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Environment':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = EnvironmentArgs.__new__(EnvironmentArgs)\n\n __props__.__dict__[\"arm_template_display_name\"] = None\n __props__.__dict__[\"created_by_user\"] = None\n __props__.__dict__[\"deployment_properties\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"provisioning_state\"] = None\n __props__.__dict__[\"resource_group_id\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"type\"] = None\n __props__.__dict__[\"unique_identifier\"] = None\n return Environment(resource_name, opts=opts, __props__=__props__)", "def get_state_by_id(state_id):\n my_state = storage.get('State', state_id)\n if my_state is None:\n abort(404)\n return jsonify(my_state.to_dict())", "def get_state_by_id(state_id):\n for key, value in storage.all(\"State\").items():\n if state_id == value.id:\n return jsonify(value.to_dict())\n abort(404)", "def state_by_id(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n return jsonify(state.to_dict())", "def a_state(id):\n state = storage.get(State, id)\n if state is not None:\n return jsonify(state.to_dict())\n abort(404)", "def get_state_by_id(state_id):\n state = storage.get(State, state_id)\n if not state:\n abort(404)\n return jsonify(state.to_dict()), 200", "def state_by_id(state_id):\n states_values = storage.all(\"State\").values()\n for obj in states_values:\n if obj.id == state_id:\n return jsonify(obj.to_dict())\n abort(404)", "def get_state(state_id):\n try:\n ''' Check that state_id exists '''\n query = State.select().where(State.id == state_id)\n if not query.exists():\n raise LookupError('state_id')\n\n state = State.get(State.id == state_id)\n return state.to_dict(), 200\n except LookupError as e:\n abort(404)\n except Exception as e:\n abort(500)", "def _get_env(cls, name: str) -> ApiEnvironment:\n envs = {e.name: e for e in cls._envs} # type: ignore\n if name not in envs:\n raise KeyError(f\"Invalid environment '{name}'. Choose from {list(envs.keys())}.\")\n return envs[name]", "def get_one_state(state_id):\n state = storage.get('State', state_id)\n if state is None:\n abort(404)\n if request.method == 'DELETE':\n storage.delete(state)\n storage.save()\n return jsonify({}), 200\n elif request.method == 'PUT':\n try:\n res_dict = request.get_json()\n res_dict['id'] = state.id\n res_dict['created_at'] = state.created_at\n state.__init__(**res_dict)\n state.save()\n return jsonify(state.to_dict()), 200\n except:\n abort(400, description='Not a JSON')\n return jsonify(state.to_dict())", "def get_state_by_name(exploration_id, state_name, strict=True):\n exploration = get_exploration_by_id(exploration_id)\n assert state_name\n\n # TODO(sll): This is too slow; improve it.\n state = None\n for candidate_state in exploration.states:\n if candidate_state.name == state_name:\n state = candidate_state\n break\n\n if strict and not state:\n raise Exception('State %s not found' % state_name)\n return state", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n force: Optional[pulumi.Input[bool]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n state: Optional[pulumi.Input[str]] = None) -> 'InstanceState':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceStateState.__new__(_InstanceStateState)\n\n __props__.__dict__[\"force\"] = force\n __props__.__dict__[\"instance_id\"] = instance_id\n __props__.__dict__[\"state\"] = state\n return InstanceState(resource_name, opts=opts, __props__=__props__)", "def get_state(state_id):\n try:\n state = jsonify(storage.get(State, state_id).to_dict())\n return state\n except:\n abort(404)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n auth_mode: Optional[pulumi.Input[str]] = None,\n default_s3_location: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n engine_security_group_id: Optional[pulumi.Input[str]] = None,\n idp_auth_url: Optional[pulumi.Input[str]] = None,\n idp_relay_state_parameter_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n service_role: Optional[pulumi.Input[str]] = None,\n subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n url: Optional[pulumi.Input[str]] = None,\n user_role: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n workspace_security_group_id: Optional[pulumi.Input[str]] = None) -> 'Studio':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _StudioState.__new__(_StudioState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"auth_mode\"] = auth_mode\n __props__.__dict__[\"default_s3_location\"] = default_s3_location\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"engine_security_group_id\"] = engine_security_group_id\n __props__.__dict__[\"idp_auth_url\"] = idp_auth_url\n __props__.__dict__[\"idp_relay_state_parameter_name\"] = idp_relay_state_parameter_name\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"service_role\"] = service_role\n __props__.__dict__[\"subnet_ids\"] = subnet_ids\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"url\"] = url\n __props__.__dict__[\"user_role\"] = user_role\n __props__.__dict__[\"vpc_id\"] = vpc_id\n __props__.__dict__[\"workspace_security_group_id\"] = workspace_security_group_id\n return Studio(resource_name, opts=opts, __props__=__props__)", "def get_state(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n return jsonify(state.to_dict())", "def get_state(state_id):\n state = storage.get(\"State\", state_id)\n if state:\n return jsonify(state.to_dict())\n abort(404)", "def get_state_by_id(state_id):\r\n response = Response(json.dumps(json_error(ResponsesREST.INVALID_INPUT.value)),\r\n status=ResponsesREST.INVALID_INPUT.value, mimetype=\"application/json\")\r\n if validator_id.is_valid({\"id\": state_id}):\r\n state_get = State()\r\n state_get.id_state = state_id\r\n result = state_get.get_state()\r\n if result in (ResponsesREST.NOT_FOUND.value, ResponsesREST.SERVER_ERROR.value):\r\n response = Response(json.dumps(json_error(result)),\r\n status=result, mimetype=\"application/json\")\r\n else:\r\n response = Response(json.dumps(result.json_state()),\r\n status=ResponsesREST.SUCCESSFUL.value,\r\n mimetype=\"application/json\")\r\n return response", "def state_id(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n else:\n return jsonify(state.to_dict())", "def a_states_id(state_id):\n i = storage.get(\"State\", state_id)\n if i:\n return jsonify(i.to_dict())\n else:\n return (jsonify({\"error\": \"Not found\"}), 404)", "def statesById(state_id):\n obj = storage.get(State, state_id)\n if obj:\n return jsonify(obj.to_dict())\n return jsonify({\"error\": \"Not found\"}), 404", "def get(self, request, state_id, format=None):\n try:\n state = State.objects.get(id=state_id)\n except ObjectDoesNotExist:\n raise NotFound(detail=\"State not found\")\n\n return Response(StateSerializer(state).data)", "def get_state_by_id(exploration_id, state_id, strict=True):\n # TODO(sll): Generalize this to handle multiple state_ids at a time.\n state_memcache_key = _get_state_memcache_key(exploration_id, state_id)\n memcached_state = memcache_services.get_multi(\n [state_memcache_key]).get(state_memcache_key)\n\n if memcached_state is not None:\n return memcached_state\n else:\n state_model = exp_models.StateModel.get(\n exploration_id, state_id, strict=strict)\n if state_model:\n state = exp_domain.State.from_dict(state_id, state_model.value)\n memcache_services.set_multi({state_memcache_key: state})\n return state\n else:\n return None", "def lookup(job_id: str) -> JobState:\n job = JobState(job_id)\n job.update()\n return job", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n asset_statuses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ZoneAssetStatusArgs']]]]] = None,\n create_time: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n discovery_spec: Optional[pulumi.Input[pulumi.InputType['ZoneDiscoverySpecArgs']]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n lake: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n resource_spec: Optional[pulumi.Input[pulumi.InputType['ZoneResourceSpecArgs']]] = None,\n state: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n uid: Optional[pulumi.Input[str]] = None,\n update_time: Optional[pulumi.Input[str]] = None) -> 'Zone':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ZoneState.__new__(_ZoneState)\n\n __props__.__dict__[\"asset_statuses\"] = asset_statuses\n __props__.__dict__[\"create_time\"] = create_time\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"discovery_spec\"] = discovery_spec\n __props__.__dict__[\"display_name\"] = display_name\n __props__.__dict__[\"labels\"] = labels\n __props__.__dict__[\"lake\"] = lake\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"project\"] = project\n __props__.__dict__[\"resource_spec\"] = resource_spec\n __props__.__dict__[\"state\"] = state\n __props__.__dict__[\"type\"] = type\n __props__.__dict__[\"uid\"] = uid\n __props__.__dict__[\"update_time\"] = update_time\n return Zone(resource_name, opts=opts, __props__=__props__)", "def states_id(id=None):\n all_states = storage.all(State)\n foundstate = None\n for key, state in all_states.items():\n if state.id == id:\n foundstate = state\n break\n\n return render_template('9-states.html', States=all_states, ID=id,\n Stateobj=foundstate)", "def view_state_id(state_id):\n states_obj = storage.all(\"State\")\n if request.method == 'GET':\n for state in states_obj.values():\n if state.id == state_id:\n id_found = state.to_dict()\n return jsonify(id_found)\n abort(404)\n\n if request.method == 'DELETE':\n for state in states_obj.values():\n if state.id == state_id:\n storage.delete(state)\n storage.save()\n return make_response(jsonify({}), 200)\n abort(404)\n\n if request.method == 'PUT':\n key = \"State.\" + state_id\n states = storage.all(\"State\")\n instance = states.get(key)\n if instance is None:\n abort(404)\n else:\n if not request.json:\n abort(400, \"Not a JSON\")\n req_var = request.get_json()\n for key, value in req_var.items():\n setattr(instance, key, value)\n storage.save()\n return make_response(jsonify(instance.to_dict()), 200)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n minimal_action: Optional[pulumi.Input[str]] = None,\n most_disruptive_allowed_action: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n preserved_state: Optional[pulumi.Input[pulumi.InputType['RegionPerInstanceConfigPreservedStateArgs']]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n region_instance_group_manager: Optional[pulumi.Input[str]] = None,\n remove_instance_state_on_destroy: Optional[pulumi.Input[bool]] = None) -> 'RegionPerInstanceConfig':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _RegionPerInstanceConfigState.__new__(_RegionPerInstanceConfigState)\n\n __props__.__dict__[\"minimal_action\"] = minimal_action\n __props__.__dict__[\"most_disruptive_allowed_action\"] = most_disruptive_allowed_action\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"preserved_state\"] = preserved_state\n __props__.__dict__[\"project\"] = project\n __props__.__dict__[\"region\"] = region\n __props__.__dict__[\"region_instance_group_manager\"] = region_instance_group_manager\n __props__.__dict__[\"remove_instance_state_on_destroy\"] = remove_instance_state_on_destroy\n return RegionPerInstanceConfig(resource_name, opts=opts, __props__=__props__)", "def state_by_id(id):\n states = storage.all('State').values()\n for state in states:\n if state.id == id:\n return render_template('9-states.html', states=state)\n return render_template('9-states.html')", "def given_state(id):\n key = 'State.{}'.format(id)\n state = storage.all(State).get(key)\n return render_template('9-states.html', states=state)", "def states_by_id(id):\n list_states = storage.all('State')\n state_id = 'State.{}'.format(id)\n if state_id in list_states:\n list_states = list_states[state_id]\n else:\n list_states = None\n return render_template('9-states.html', list_states=list_states)", "def get_states(state_id):\n if request.method == 'POST':\n js = request.get_json()\n if js is None:\n return jsonify({'error': 'Not a JSON'}), 400\n if js.get('name', None) is None:\n return jsonify({'error': 'Missing name'}), 400\n obj = State(**js)\n obj.save()\n return jsonify(obj.to_dict()), 200\n\n if state_id:\n state = storage.get('State', state_id)\n if state is None:\n return jsonify({'error': 'Not found'}), 404\n if request.method == 'DELETE':\n storage.delete(state)\n storage.save()\n return jsonify({}), 200\n elif request.method == 'PUT':\n js = request.get_json()\n if js is None:\n return jsonify({'error': 'Not a JSON'}), 400\n js.pop('id', None)\n js.pop('created_at', None)\n js.pop('updated_at', None)\n for k, v in js.items():\n setattr(state, k, v)\n state.save()\n return jsonify(state.to_dict()), 200\n else:\n return jsonify(state.to_dict()), 200\n states = []\n states_obj = storage.all('State')\n for obj in states_obj:\n states.append(states_obj[obj].to_dict())\n return jsonify(states)", "def _get_service_env(self, attrs):\n se_params = {\n 'environment__name': attrs.get('environment'),\n }\n if attrs.get('service_id'):\n se_params['service_id'] = attrs['service_id']\n elif attrs.get('service_uid'):\n se_params['service__ci_uid'] = attrs['service_uid']\n else:\n se_params['service__name'] = attrs['service']\n try:\n se = ServiceEnvironment.objects.get(**se_params)\n except ServiceEnvironment.DoesNotExist:\n params = \", \".join(\n [\"{}={}\".format(k, v) for k, v in se_params.items()]\n )\n raise ServiceEnvironmentDoesNotExistError(\n 'query params: {}'.format(params)\n )\n except ServiceEnvironment.MultipleObjectsReturned:\n params = \", \".join(\n [\"{}={}\".format(k, v) for k, v in se_params.items()]\n )\n raise MultipleServiceEnvironmentsReturned(\n 'query params: {}'.format(params)\n )\n return se", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n add_on: Optional[pulumi.Input[pulumi.InputType['InstanceAddOnArgs']]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n blueprint_id: Optional[pulumi.Input[str]] = None,\n bundle_id: Optional[pulumi.Input[str]] = None,\n cpu_count: Optional[pulumi.Input[int]] = None,\n created_at: Optional[pulumi.Input[str]] = None,\n ip_address_type: Optional[pulumi.Input[str]] = None,\n ipv6_address: Optional[pulumi.Input[str]] = None,\n ipv6_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n is_static_ip: Optional[pulumi.Input[bool]] = None,\n key_pair_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n public_ip_address: Optional[pulumi.Input[str]] = None,\n ram_size: Optional[pulumi.Input[float]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n user_data: Optional[pulumi.Input[str]] = None,\n username: Optional[pulumi.Input[str]] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceState.__new__(_InstanceState)\n\n __props__.__dict__[\"add_on\"] = add_on\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"availability_zone\"] = availability_zone\n __props__.__dict__[\"blueprint_id\"] = blueprint_id\n __props__.__dict__[\"bundle_id\"] = bundle_id\n __props__.__dict__[\"cpu_count\"] = cpu_count\n __props__.__dict__[\"created_at\"] = created_at\n __props__.__dict__[\"ip_address_type\"] = ip_address_type\n __props__.__dict__[\"ipv6_address\"] = ipv6_address\n __props__.__dict__[\"ipv6_addresses\"] = ipv6_addresses\n __props__.__dict__[\"is_static_ip\"] = is_static_ip\n __props__.__dict__[\"key_pair_name\"] = key_pair_name\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"private_ip_address\"] = private_ip_address\n __props__.__dict__[\"public_ip_address\"] = public_ip_address\n __props__.__dict__[\"ram_size\"] = ram_size\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"user_data\"] = user_data\n __props__.__dict__[\"username\"] = username\n return Instance(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n activation_key: Optional[pulumi.Input[str]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n ip_address: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_link_endpoint: Optional[pulumi.Input[str]] = None,\n security_group_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n subnet_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n vpc_endpoint_id: Optional[pulumi.Input[str]] = None) -> 'Agent':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _AgentState.__new__(_AgentState)\n\n __props__.__dict__[\"activation_key\"] = activation_key\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"ip_address\"] = ip_address\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"private_link_endpoint\"] = private_link_endpoint\n __props__.__dict__[\"security_group_arns\"] = security_group_arns\n __props__.__dict__[\"subnet_arns\"] = subnet_arns\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"vpc_endpoint_id\"] = vpc_endpoint_id\n return Agent(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Machine':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = MachineArgs.__new__(MachineArgs)\n\n __props__.__dict__[\"ad_fqdn\"] = None\n __props__.__dict__[\"agent_configuration\"] = None\n __props__.__dict__[\"agent_upgrade\"] = None\n __props__.__dict__[\"agent_version\"] = None\n __props__.__dict__[\"client_public_key\"] = None\n __props__.__dict__[\"cloud_metadata\"] = None\n __props__.__dict__[\"detected_properties\"] = None\n __props__.__dict__[\"display_name\"] = None\n __props__.__dict__[\"dns_fqdn\"] = None\n __props__.__dict__[\"domain_name\"] = None\n __props__.__dict__[\"error_details\"] = None\n __props__.__dict__[\"extensions\"] = None\n __props__.__dict__[\"identity\"] = None\n __props__.__dict__[\"last_status_change\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"location_data\"] = None\n __props__.__dict__[\"machine_fqdn\"] = None\n __props__.__dict__[\"mssql_discovered\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"os_name\"] = None\n __props__.__dict__[\"os_profile\"] = None\n __props__.__dict__[\"os_sku\"] = None\n __props__.__dict__[\"os_type\"] = None\n __props__.__dict__[\"os_version\"] = None\n __props__.__dict__[\"parent_cluster_resource_id\"] = None\n __props__.__dict__[\"private_link_scope_resource_id\"] = None\n __props__.__dict__[\"provisioning_state\"] = None\n __props__.__dict__[\"resources\"] = None\n __props__.__dict__[\"service_statuses\"] = None\n __props__.__dict__[\"status\"] = None\n __props__.__dict__[\"system_data\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"type\"] = None\n __props__.__dict__[\"vm_id\"] = None\n __props__.__dict__[\"vm_uuid\"] = None\n return Machine(resource_name, opts=opts, __props__=__props__)", "def get_state_by_id(states: [State], state_id: str, id_type: str) -> State:\n if id_type == 'new':\n for state in states:\n if state.new_id == state_id:\n return state\n if id_type == 'old':\n for state in states:\n if state.id == state_id:\n return state\n return states[0]", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n application_insights_id: Optional[pulumi.Input[str]] = None,\n container_registry_id: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n discovery_url: Optional[pulumi.Input[str]] = None,\n encryption: Optional[pulumi.Input[pulumi.InputType['WorkspaceEncryptionArgs']]] = None,\n friendly_name: Optional[pulumi.Input[str]] = None,\n high_business_impact: Optional[pulumi.Input[bool]] = None,\n identity: Optional[pulumi.Input[pulumi.InputType['WorkspaceIdentityArgs']]] = None,\n image_build_compute_name: Optional[pulumi.Input[str]] = None,\n key_vault_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n primary_user_assigned_identity: Optional[pulumi.Input[str]] = None,\n public_access_behind_virtual_network_enabled: Optional[pulumi.Input[bool]] = None,\n public_network_access_enabled: Optional[pulumi.Input[bool]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n sku_name: Optional[pulumi.Input[str]] = None,\n storage_account_id: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n v1_legacy_mode_enabled: Optional[pulumi.Input[bool]] = None,\n workspace_id: Optional[pulumi.Input[str]] = None) -> 'Workspace':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _WorkspaceState.__new__(_WorkspaceState)\n\n __props__.__dict__[\"application_insights_id\"] = application_insights_id\n __props__.__dict__[\"container_registry_id\"] = container_registry_id\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"discovery_url\"] = discovery_url\n __props__.__dict__[\"encryption\"] = encryption\n __props__.__dict__[\"friendly_name\"] = friendly_name\n __props__.__dict__[\"high_business_impact\"] = high_business_impact\n __props__.__dict__[\"identity\"] = identity\n __props__.__dict__[\"image_build_compute_name\"] = image_build_compute_name\n __props__.__dict__[\"key_vault_id\"] = key_vault_id\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"primary_user_assigned_identity\"] = primary_user_assigned_identity\n __props__.__dict__[\"public_access_behind_virtual_network_enabled\"] = public_access_behind_virtual_network_enabled\n __props__.__dict__[\"public_network_access_enabled\"] = public_network_access_enabled\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"sku_name\"] = sku_name\n __props__.__dict__[\"storage_account_id\"] = storage_account_id\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"v1_legacy_mode_enabled\"] = v1_legacy_mode_enabled\n __props__.__dict__[\"workspace_id\"] = workspace_id\n return Workspace(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n attributes: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n description: Optional[pulumi.Input[str]] = None,\n disable_status_check: Optional[pulumi.Input[bool]] = None,\n email: Optional[pulumi.Input[str]] = None,\n masters: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project_id: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n ttl: Optional[pulumi.Input[int]] = None,\n type: Optional[pulumi.Input[str]] = None,\n value_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None) -> 'Zone':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ZoneState.__new__(_ZoneState)\n\n __props__.__dict__[\"attributes\"] = attributes\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"disable_status_check\"] = disable_status_check\n __props__.__dict__[\"email\"] = email\n __props__.__dict__[\"masters\"] = masters\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"project_id\"] = project_id\n __props__.__dict__[\"region\"] = region\n __props__.__dict__[\"ttl\"] = ttl\n __props__.__dict__[\"type\"] = type\n __props__.__dict__[\"value_specs\"] = value_specs\n return Zone(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n auto_scaling_configuration_arn: Optional[pulumi.Input[str]] = None,\n encryption_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceEncryptionConfigurationArgs']]] = None,\n health_check_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceHealthCheckConfigurationArgs']]] = None,\n instance_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceInstanceConfigurationArgs']]] = None,\n network_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceNetworkConfigurationArgs']]] = None,\n observability_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceObservabilityConfigurationArgs']]] = None,\n service_id: Optional[pulumi.Input[str]] = None,\n service_name: Optional[pulumi.Input[str]] = None,\n service_url: Optional[pulumi.Input[str]] = None,\n source_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceSourceConfigurationArgs']]] = None,\n status: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Service':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ServiceState.__new__(_ServiceState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"auto_scaling_configuration_arn\"] = auto_scaling_configuration_arn\n __props__.__dict__[\"encryption_configuration\"] = encryption_configuration\n __props__.__dict__[\"health_check_configuration\"] = health_check_configuration\n __props__.__dict__[\"instance_configuration\"] = instance_configuration\n __props__.__dict__[\"network_configuration\"] = network_configuration\n __props__.__dict__[\"observability_configuration\"] = observability_configuration\n __props__.__dict__[\"service_id\"] = service_id\n __props__.__dict__[\"service_name\"] = service_name\n __props__.__dict__[\"service_url\"] = service_url\n __props__.__dict__[\"source_configuration\"] = source_configuration\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n return Service(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n comparison: Optional[pulumi.Input[str]] = None,\n created_at: Optional[pulumi.Input[int]] = None,\n critical: Optional[pulumi.Input[pulumi.InputType['InfraAlertConditionCriticalArgs']]] = None,\n description: Optional[pulumi.Input[str]] = None,\n enabled: Optional[pulumi.Input[bool]] = None,\n entity_guid: Optional[pulumi.Input[str]] = None,\n event: Optional[pulumi.Input[str]] = None,\n integration_provider: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n policy_id: Optional[pulumi.Input[int]] = None,\n process_where: Optional[pulumi.Input[str]] = None,\n runbook_url: Optional[pulumi.Input[str]] = None,\n select: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n updated_at: Optional[pulumi.Input[int]] = None,\n violation_close_timer: Optional[pulumi.Input[int]] = None,\n warning: Optional[pulumi.Input[pulumi.InputType['InfraAlertConditionWarningArgs']]] = None,\n where: Optional[pulumi.Input[str]] = None) -> 'InfraAlertCondition':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InfraAlertConditionState.__new__(_InfraAlertConditionState)\n\n __props__.__dict__[\"comparison\"] = comparison\n __props__.__dict__[\"created_at\"] = created_at\n __props__.__dict__[\"critical\"] = critical\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"enabled\"] = enabled\n __props__.__dict__[\"entity_guid\"] = entity_guid\n __props__.__dict__[\"event\"] = event\n __props__.__dict__[\"integration_provider\"] = integration_provider\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"policy_id\"] = policy_id\n __props__.__dict__[\"process_where\"] = process_where\n __props__.__dict__[\"runbook_url\"] = runbook_url\n __props__.__dict__[\"select\"] = select\n __props__.__dict__[\"type\"] = type\n __props__.__dict__[\"updated_at\"] = updated_at\n __props__.__dict__[\"violation_close_timer\"] = violation_close_timer\n __props__.__dict__[\"warning\"] = warning\n __props__.__dict__[\"where\"] = where\n return InfraAlertCondition(resource_name, opts=opts, __props__=__props__)", "def __getitem__(self, identifier: str) -> BaseRegistryEntry:\n self._load_all_manifests()\n if identifier not in self._REGISTERED_ENVS:\n raise KeyError(f\"The entry {identifier} is not present in the registry.\")\n return self._REGISTERED_ENVS[identifier]", "def get_state(self, entity_id: str, attribute: str = \"state\") -> dict:\n if not self.connected:\n LOGGER.warning(\"Connection is not yet ready.\")\n state_obj = self._states.get(entity_id)\n if state_obj:\n if attribute == \"state\":\n return state_obj[\"state\"]\n if attribute:\n return state_obj[\"attributes\"].get(attribute)\n return state_obj\n return None", "def update_state(state_id):\n if not request.json:\n abort(400, \"Not a JSON\")\n state = storage.get(\"State\", id=state_id)\n if state:\n state.name = request.json['name']\n state.save()\n return jsonify(state.to_dict()), 200\n abort(404)", "def updateState(state_id):\n # garbage = {\"id\", \"created_at\", \"updated_at\"}\n state = storage.get(\"State\", state_id)\n if state is None:\n abort(404)\n thing = request.get_json(silent=True)\n if thing is None or not request.json:\n return (jsonify({\"error\": \"Not a JSON\"}), 400)\n thing = request.get_json(silent=True)\n for key, value in thing.items():\n if key == 'name':\n setattr(state, key, value)\n state.save()\n return (jsonify(state.to_dict()), 200)", "def get_experiment_state_v1(self, skill_id, experiment_id, **kwargs):\n # type: (str, str, **Any) -> Union[ApiResponse, object, GetExperimentStateResponse_5152b250, StandardizedError_f5106a89, BadRequestError_f854b05]\n operation_name = \"get_experiment_state_v1\"\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'skill_id' is set\n if ('skill_id' not in params) or (params['skill_id'] is None):\n raise ValueError(\n \"Missing the required parameter `skill_id` when calling `\" + operation_name + \"`\")\n # verify the required parameter 'experiment_id' is set\n if ('experiment_id' not in params) or (params['experiment_id'] is None):\n raise ValueError(\n \"Missing the required parameter `experiment_id` when calling `\" + operation_name + \"`\")\n\n resource_path = '/v1/skills/{skillId}/experiments/{experimentId}/state'\n resource_path = resource_path.replace('{format}', 'json')\n\n path_params = {} # type: Dict\n if 'skill_id' in params:\n path_params['skillId'] = params['skill_id']\n if 'experiment_id' in params:\n path_params['experimentId'] = params['experiment_id']\n\n query_params = [] # type: List\n\n header_params = [] # type: List\n\n body_params = None\n header_params.append(('Content-type', 'application/json'))\n header_params.append(('User-Agent', self.user_agent))\n\n # Response Type\n full_response = False\n if 'full_response' in params:\n full_response = params['full_response']\n\n # Authentication setting\n access_token = self._lwa_service_client.get_access_token_from_refresh_token()\n authorization_value = \"Bearer \" + access_token\n header_params.append(('Authorization', authorization_value))\n\n error_definitions = [] # type: List\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.experiment.get_experiment_state_response.GetExperimentStateResponse\", status_code=200, message=\"Returned skill experiment state.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=401, message=\"The auth token is invalid/expired or doesn&#39;t have access to the resource.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.bad_request_error.BadRequestError\", status_code=403, message=\"The operation being requested is not allowed.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=404, message=\"The resource being requested is not found.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=429, message=\"Exceeds the permitted request limit. Throttling criteria includes total requests, per API, ClientId, and CustomerId.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=500, message=\"Internal Server Error.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=503, message=\"Service Unavailable.\"))\n\n api_response = self.invoke(\n method=\"GET\",\n endpoint=self._api_endpoint,\n path=resource_path,\n path_params=path_params,\n query_params=query_params,\n header_params=header_params,\n body=body_params,\n response_definitions=error_definitions,\n response_type=\"ask_smapi_model.v1.skill.experiment.get_experiment_state_response.GetExperimentStateResponse\")\n\n if full_response:\n return api_response\n return api_response.body", "def environment(self, name):\n return self.environments[name]", "def get(self, sid: typing.Union[uuid.UUID, int]) -> bytes:\n if not self.tag.training:\n return bytes()\n if isinstance(sid, int):\n sid = self.tag.states[sid]\n if sid not in self.tag.states:\n raise Level.Invalid(f'Unknown state reference for {self}: {sid}')\n LOGGER.debug('%s: Getting state %s', self, sid)\n return STATES(self.registry, self.project.key, self.lineage.key, self.key, sid)", "def state(name, path=None):\n # Don't use _ensure_exists() here, it will mess with _change_state()\n\n cachekey = f\"lxc.state.{name}{path}\"\n try:\n return __context__[cachekey]\n except KeyError:\n if not exists(name, path=path):\n __context__[cachekey] = None\n else:\n cmd = \"lxc-info\"\n if path:\n cmd += f\" -P {shlex.quote(path)}\"\n cmd += f\" -n {name}\"\n ret = __salt__[\"cmd.run_all\"](cmd, python_shell=False)\n if ret[\"retcode\"] != 0:\n _clear_context()\n raise CommandExecutionError(\n f\"Unable to get state of container '{name}'\"\n )\n c_infos = ret[\"stdout\"].splitlines()\n c_state = None\n for c_info in c_infos:\n stat = c_info.split(\":\")\n if stat[0].lower() == \"state\":\n c_state = stat[1].strip().lower()\n break\n __context__[cachekey] = c_state\n return __context__[cachekey]", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'FhirStore':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = FhirStoreArgs.__new__(FhirStoreArgs)\n\n __props__.__dict__[\"complex_data_type_reference_parsing\"] = None\n __props__.__dict__[\"dataset_id\"] = None\n __props__.__dict__[\"default_search_handling_strict\"] = None\n __props__.__dict__[\"disable_referential_integrity\"] = None\n __props__.__dict__[\"disable_resource_versioning\"] = None\n __props__.__dict__[\"enable_update_create\"] = None\n __props__.__dict__[\"fhir_store_id\"] = None\n __props__.__dict__[\"labels\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"notification_config\"] = None\n __props__.__dict__[\"notification_configs\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"stream_configs\"] = None\n __props__.__dict__[\"validation_config\"] = None\n __props__.__dict__[\"version\"] = None\n return FhirStore(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = InstanceArgs.__new__(InstanceArgs)\n\n __props__.__dict__[\"build\"] = None\n __props__.__dict__[\"config\"] = None\n __props__.__dict__[\"create_time\"] = None\n __props__.__dict__[\"instance_id\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"state\"] = None\n __props__.__dict__[\"state_message\"] = None\n __props__.__dict__[\"update_time\"] = None\n return Instance(resource_name, opts=opts, __props__=__props__)", "def state_id(id):\n flag = 0\n states = storage.all(State).values()\n for state in states:\n if state.id == id:\n flag = 1\n break\n return render_template('9-states.html', state=state, flag=flag)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Workflow':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = WorkflowArgs.__new__(WorkflowArgs)\n\n __props__.__dict__[\"acr\"] = None\n __props__.__dict__[\"aks_resource_id\"] = None\n __props__.__dict__[\"app_name\"] = None\n __props__.__dict__[\"auth_status\"] = None\n __props__.__dict__[\"branch_name\"] = None\n __props__.__dict__[\"builder_version\"] = None\n __props__.__dict__[\"deployment_properties\"] = None\n __props__.__dict__[\"docker_build_context\"] = None\n __props__.__dict__[\"dockerfile\"] = None\n __props__.__dict__[\"dockerfile_generation_mode\"] = None\n __props__.__dict__[\"dockerfile_output_directory\"] = None\n __props__.__dict__[\"generation_language\"] = None\n __props__.__dict__[\"image_name\"] = None\n __props__.__dict__[\"image_tag\"] = None\n __props__.__dict__[\"language_version\"] = None\n __props__.__dict__[\"last_workflow_run\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"manifest_generation_mode\"] = None\n __props__.__dict__[\"manifest_output_directory\"] = None\n __props__.__dict__[\"manifest_type\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"namespace\"] = None\n __props__.__dict__[\"oidc_credentials\"] = None\n __props__.__dict__[\"port\"] = None\n __props__.__dict__[\"pr_status\"] = None\n __props__.__dict__[\"pr_url\"] = None\n __props__.__dict__[\"pull_number\"] = None\n __props__.__dict__[\"repository_name\"] = None\n __props__.__dict__[\"repository_owner\"] = None\n __props__.__dict__[\"system_data\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"type\"] = None\n return Workflow(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n connection_string: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n instance_series: Optional[pulumi.Input[str]] = None,\n mysql_version: Optional[pulumi.Input[int]] = None,\n port: Optional[pulumi.Input[str]] = None,\n specification: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceState.__new__(_InstanceState)\n\n __props__.__dict__[\"connection_string\"] = connection_string\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"instance_charge_type\"] = instance_charge_type\n __props__.__dict__[\"instance_series\"] = instance_series\n __props__.__dict__[\"mysql_version\"] = mysql_version\n __props__.__dict__[\"port\"] = port\n __props__.__dict__[\"specification\"] = specification\n __props__.__dict__[\"vpc_id\"] = vpc_id\n __props__.__dict__[\"vswitch_id\"] = vswitch_id\n __props__.__dict__[\"zone_id\"] = zone_id\n return Instance(resource_name, opts=opts, __props__=__props__)", "def auto_env(env_id, **kwargs):\n if env_id in ENV_BUILDER_REGISTRY:\n return ENV_BUILDER_REGISTRY[env_id](env_id, **kwargs)\n else:\n return base_env(env_id, **kwargs)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Service':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ServiceArgs.__new__(ServiceArgs)\n\n __props__.__dict__[\"capacity_provider_strategy\"] = None\n __props__.__dict__[\"cluster\"] = None\n __props__.__dict__[\"deployment_configuration\"] = None\n __props__.__dict__[\"deployment_controller\"] = None\n __props__.__dict__[\"desired_count\"] = None\n __props__.__dict__[\"enable_ecs_managed_tags\"] = None\n __props__.__dict__[\"enable_execute_command\"] = None\n __props__.__dict__[\"health_check_grace_period_seconds\"] = None\n __props__.__dict__[\"launch_type\"] = None\n __props__.__dict__[\"load_balancers\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"network_configuration\"] = None\n __props__.__dict__[\"placement_constraints\"] = None\n __props__.__dict__[\"placement_strategies\"] = None\n __props__.__dict__[\"platform_version\"] = None\n __props__.__dict__[\"propagate_tags\"] = None\n __props__.__dict__[\"role\"] = None\n __props__.__dict__[\"scheduling_strategy\"] = None\n __props__.__dict__[\"service_arn\"] = None\n __props__.__dict__[\"service_connect_configuration\"] = None\n __props__.__dict__[\"service_name\"] = None\n __props__.__dict__[\"service_registries\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"task_definition\"] = None\n return Service(resource_name, opts=opts, __props__=__props__)", "def getstate(self,name):\n state = self.states[name]\n debug('kfnode.getstate ',(name,state))\n return state", "def get_project_environment(name=None, uuid=None, project_name=None, project_uuid=None):\n\n client = get_api_client()\n project_data = get_project(project_name, project_uuid)\n project_uuid = project_data[\"metadata\"][\"uuid\"]\n project_name = project_data[\"status\"][\"name\"]\n environments = project_data[\"status\"][\"resources\"][\"environment_reference_list\"]\n project_environments = {row[\"uuid\"]: True for row in environments}\n\n if not name and not uuid:\n return None, project_data\n\n if uuid is None:\n params = {\"filter\": \"name=={};project_reference=={}\".format(name, project_uuid)}\n LOG.info(\n \"Searching for the environment {} under project {}\".format(\n name, project_name\n )\n )\n res, err = client.environment.list(params=params)\n if err:\n raise Exception(\"[{}] - {}\".format(err[\"code\"], err[\"error\"]))\n\n response = res.json()\n entities = response.get(\"entities\")\n if not entities:\n raise Exception(\n \"No environment with name {} found in project {}\".format(\n name, project_name\n )\n )\n\n environment = entities[0]\n uuid = environment[\"metadata\"][\"uuid\"]\n\n if not project_environments.get(uuid):\n raise Exception(\n \"No environment with name {} found in project {}\".format(name, project_name)\n )\n\n LOG.info(\"Environment {} found \".format(name))\n\n # for getting additional fields\n return get_environment_by_uuid(uuid), project_data", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n name: Optional[pulumi.Input[str]] = None,\n virtual_hub_id: Optional[pulumi.Input[str]] = None) -> 'VirtualNetworkAppliance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _VirtualNetworkApplianceState.__new__(_VirtualNetworkApplianceState)\n\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"virtual_hub_id\"] = virtual_hub_id\n return VirtualNetworkAppliance(resource_name, opts=opts, __props__=__props__)", "def get_with_inventory(self, context, id_):\n try:\n db_resource_mgr_data = self.db_api.get_resource_manager(\n context, id_)\n db_props_data = self.db_api.get_resource_mgr_properties(context,\n id_, key=eon_const.RESOURCE_MGR_STATE_KEY)\n\n driver_obj = driver.load_resource_mgr_driver(\n db_resource_mgr_data['type'])\n inventory = driver_obj.get_inventory(db_resource_mgr_data)\n resource_mgr_data = _make_response(db_resource_mgr_data,\n property_list=db_props_data,\n inventory=inventory)\n LOG.debug(\"[%s] Resource data %s\"\n % (id_, logging.mask_password(resource_mgr_data)))\n return resource_mgr_data\n\n except exception.NotFound as e:\n LOG.error(e)\n raise e\n except Exception as e:\n msg = \"Error retrieving the 'resource':%s. Reason: %s\" % (\n id_, e.message)\n LOG.exception(msg)\n raise exception.RetrieveException(e.message)", "def read_one(key):\n\n environment = (Environment.query.filter(Environment.key == key).one_or_none())\n\n if environment is not None:\n # Serialize the data for the response\n environment_schema = EnvironmentSchema()\n data = environment_schema.dump(environment)\n return data\n else:\n abort(\n 404, \"Environment with key {key} not found\".format(key=key)\n )", "def standby_setting_get_by_id(context, id, session=None):\n result = model_query(context, models.StandbySetting, session=session).\\\n filter_by(id=id).first()\n if not result:\n raise exception.NotFound(\"setting\")\n\n return result", "def from_esi_name(cls, esi_state_name: str) -> \"Structure.State\":\n STATES_ESI_MAP = {\n \"anchor_vulnerable\": cls.ANCHOR_VULNERABLE,\n \"anchoring\": cls.ANCHORING,\n \"armor_reinforce\": cls.ARMOR_REINFORCE,\n \"armor_vulnerable\": cls.ARMOR_VULNERABLE,\n \"deploy_vulnerable\": cls.DEPLOY_VULNERABLE,\n \"fitting_invulnerable\": cls.FITTING_INVULNERABLE,\n \"hull_reinforce\": cls.HULL_REINFORCE,\n \"hull_vulnerable\": cls.HULL_VULNERABLE,\n \"online_deprecated\": cls.ONLINE_DEPRECATED,\n \"onlining_vulnerable\": cls.ONLINING_VULNERABLE,\n \"shield_vulnerable\": cls.SHIELD_VULNERABLE,\n \"unanchored\": cls.UNANCHORED,\n \"offline\": cls.POS_OFFLINE,\n \"online\": cls.POS_ONLINE,\n \"onlining\": cls.POS_ONLINING,\n \"reinforced\": cls.POS_REINFORCED,\n \"unanchoring \": cls.POS_UNANCHORING,\n }\n return (\n STATES_ESI_MAP[esi_state_name]\n if esi_state_name in STATES_ESI_MAP\n else cls.UNKNOWN\n )", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n git_remote_settings: Optional[pulumi.Input[pulumi.InputType['RepositoryGitRemoteSettingsArgs']]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n workspace_compilation_overrides: Optional[pulumi.Input[pulumi.InputType['RepositoryWorkspaceCompilationOverridesArgs']]] = None) -> 'Repository':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _RepositoryState.__new__(_RepositoryState)\n\n __props__.__dict__[\"git_remote_settings\"] = git_remote_settings\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"project\"] = project\n __props__.__dict__[\"region\"] = region\n __props__.__dict__[\"workspace_compilation_overrides\"] = workspace_compilation_overrides\n return Repository(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n additional_locations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceAdditionalLocationArgs']]]]] = None,\n certificates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceCertificateArgs']]]]] = None,\n client_certificate_enabled: Optional[pulumi.Input[bool]] = None,\n delegation: Optional[pulumi.Input[pulumi.InputType['ServiceDelegationArgs']]] = None,\n developer_portal_url: Optional[pulumi.Input[str]] = None,\n gateway_disabled: Optional[pulumi.Input[bool]] = None,\n gateway_regional_url: Optional[pulumi.Input[str]] = None,\n gateway_url: Optional[pulumi.Input[str]] = None,\n hostname_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceHostnameConfigurationArgs']]] = None,\n identity: Optional[pulumi.Input[pulumi.InputType['ServiceIdentityArgs']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n management_api_url: Optional[pulumi.Input[str]] = None,\n min_api_version: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n notification_sender_email: Optional[pulumi.Input[str]] = None,\n policy: Optional[pulumi.Input[pulumi.InputType['ServicePolicyArgs']]] = None,\n portal_url: Optional[pulumi.Input[str]] = None,\n private_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n protocols: Optional[pulumi.Input[pulumi.InputType['ServiceProtocolsArgs']]] = None,\n public_ip_address_id: Optional[pulumi.Input[str]] = None,\n public_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n public_network_access_enabled: Optional[pulumi.Input[bool]] = None,\n publisher_email: Optional[pulumi.Input[str]] = None,\n publisher_name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n scm_url: Optional[pulumi.Input[str]] = None,\n security: Optional[pulumi.Input[pulumi.InputType['ServiceSecurityArgs']]] = None,\n sign_in: Optional[pulumi.Input[pulumi.InputType['ServiceSignInArgs']]] = None,\n sign_up: Optional[pulumi.Input[pulumi.InputType['ServiceSignUpArgs']]] = None,\n sku_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tenant_access: Optional[pulumi.Input[pulumi.InputType['ServiceTenantAccessArgs']]] = None,\n virtual_network_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceVirtualNetworkConfigurationArgs']]] = None,\n virtual_network_type: Optional[pulumi.Input[str]] = None,\n zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'Service':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ServiceState.__new__(_ServiceState)\n\n __props__.__dict__[\"additional_locations\"] = additional_locations\n __props__.__dict__[\"certificates\"] = certificates\n __props__.__dict__[\"client_certificate_enabled\"] = client_certificate_enabled\n __props__.__dict__[\"delegation\"] = delegation\n __props__.__dict__[\"developer_portal_url\"] = developer_portal_url\n __props__.__dict__[\"gateway_disabled\"] = gateway_disabled\n __props__.__dict__[\"gateway_regional_url\"] = gateway_regional_url\n __props__.__dict__[\"gateway_url\"] = gateway_url\n __props__.__dict__[\"hostname_configuration\"] = hostname_configuration\n __props__.__dict__[\"identity\"] = identity\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"management_api_url\"] = management_api_url\n __props__.__dict__[\"min_api_version\"] = min_api_version\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"notification_sender_email\"] = notification_sender_email\n __props__.__dict__[\"policy\"] = policy\n __props__.__dict__[\"portal_url\"] = portal_url\n __props__.__dict__[\"private_ip_addresses\"] = private_ip_addresses\n __props__.__dict__[\"protocols\"] = protocols\n __props__.__dict__[\"public_ip_address_id\"] = public_ip_address_id\n __props__.__dict__[\"public_ip_addresses\"] = public_ip_addresses\n __props__.__dict__[\"public_network_access_enabled\"] = public_network_access_enabled\n __props__.__dict__[\"publisher_email\"] = publisher_email\n __props__.__dict__[\"publisher_name\"] = publisher_name\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"scm_url\"] = scm_url\n __props__.__dict__[\"security\"] = security\n __props__.__dict__[\"sign_in\"] = sign_in\n __props__.__dict__[\"sign_up\"] = sign_up\n __props__.__dict__[\"sku_name\"] = sku_name\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tenant_access\"] = tenant_access\n __props__.__dict__[\"virtual_network_configuration\"] = virtual_network_configuration\n __props__.__dict__[\"virtual_network_type\"] = virtual_network_type\n __props__.__dict__[\"zones\"] = zones\n return Service(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Canary':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = CanaryArgs.__new__(CanaryArgs)\n\n __props__.__dict__[\"artifact_config\"] = None\n __props__.__dict__[\"artifact_s3_location\"] = None\n __props__.__dict__[\"code\"] = None\n __props__.__dict__[\"delete_lambda_resources_on_canary_deletion\"] = None\n __props__.__dict__[\"execution_role_arn\"] = None\n __props__.__dict__[\"failure_retention_period\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"run_config\"] = None\n __props__.__dict__[\"runtime_version\"] = None\n __props__.__dict__[\"schedule\"] = None\n __props__.__dict__[\"start_canary_after_creation\"] = None\n __props__.__dict__[\"state\"] = None\n __props__.__dict__[\"success_retention_period\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"visual_reference\"] = None\n __props__.__dict__[\"vpc_config\"] = None\n return Canary(resource_name, opts=opts, __props__=__props__)", "def _get_latest_version(self, name: str) -> Environment:\n result = _get_latest(\n name,\n self._version_operations,\n self._resource_group_name,\n self._workspace_name,\n self._registry_name,\n )\n return Environment._from_rest_object(result)", "def get_id(\n name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n in_states=None,\n filters=None,\n):\n instance_ids = find_instances(\n name=name,\n tags=tags,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n in_states=in_states,\n filters=filters,\n )\n if instance_ids:\n log.info(\"Instance ids: %s\", \" \".join(instance_ids))\n if len(instance_ids) == 1:\n return instance_ids[0]\n else:\n raise CommandExecutionError(\n \"Found more than one instance matching the criteria.\"\n )\n else:\n log.warning(\"Could not find instance.\")\n return None", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = InstanceArgs.__new__(InstanceArgs)\n\n __props__.__dict__[\"additional_info\"] = None\n __props__.__dict__[\"affinity\"] = None\n __props__.__dict__[\"availability_zone\"] = None\n __props__.__dict__[\"block_device_mappings\"] = None\n __props__.__dict__[\"cpu_options\"] = None\n __props__.__dict__[\"credit_specification\"] = None\n __props__.__dict__[\"disable_api_termination\"] = None\n __props__.__dict__[\"ebs_optimized\"] = None\n __props__.__dict__[\"elastic_gpu_specifications\"] = None\n __props__.__dict__[\"elastic_inference_accelerators\"] = None\n __props__.__dict__[\"enclave_options\"] = None\n __props__.__dict__[\"hibernation_options\"] = None\n __props__.__dict__[\"host_id\"] = None\n __props__.__dict__[\"host_resource_group_arn\"] = None\n __props__.__dict__[\"iam_instance_profile\"] = None\n __props__.__dict__[\"image_id\"] = None\n __props__.__dict__[\"instance_initiated_shutdown_behavior\"] = None\n __props__.__dict__[\"instance_type\"] = None\n __props__.__dict__[\"ipv6_address_count\"] = None\n __props__.__dict__[\"ipv6_addresses\"] = None\n __props__.__dict__[\"kernel_id\"] = None\n __props__.__dict__[\"key_name\"] = None\n __props__.__dict__[\"launch_template\"] = None\n __props__.__dict__[\"license_specifications\"] = None\n __props__.__dict__[\"monitoring\"] = None\n __props__.__dict__[\"network_interfaces\"] = None\n __props__.__dict__[\"placement_group_name\"] = None\n __props__.__dict__[\"private_dns_name\"] = None\n __props__.__dict__[\"private_dns_name_options\"] = None\n __props__.__dict__[\"private_ip\"] = None\n __props__.__dict__[\"private_ip_address\"] = None\n __props__.__dict__[\"propagate_tags_to_volume_on_creation\"] = None\n __props__.__dict__[\"public_dns_name\"] = None\n __props__.__dict__[\"public_ip\"] = None\n __props__.__dict__[\"ramdisk_id\"] = None\n __props__.__dict__[\"security_group_ids\"] = None\n __props__.__dict__[\"security_groups\"] = None\n __props__.__dict__[\"source_dest_check\"] = None\n __props__.__dict__[\"ssm_associations\"] = None\n __props__.__dict__[\"subnet_id\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"tenancy\"] = None\n __props__.__dict__[\"user_data\"] = None\n __props__.__dict__[\"volumes\"] = None\n return Instance(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n connection_string: Optional[pulumi.Input[str]] = None,\n create_sample_data: Optional[pulumi.Input[bool]] = None,\n db_instance_category: Optional[pulumi.Input[str]] = None,\n db_instance_class: Optional[pulumi.Input[str]] = None,\n db_instance_mode: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n encryption_key: Optional[pulumi.Input[str]] = None,\n encryption_type: Optional[pulumi.Input[str]] = None,\n engine: Optional[pulumi.Input[str]] = None,\n engine_version: Optional[pulumi.Input[str]] = None,\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n instance_group_count: Optional[pulumi.Input[int]] = None,\n instance_network_type: Optional[pulumi.Input[str]] = None,\n instance_spec: Optional[pulumi.Input[str]] = None,\n ip_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InstanceIpWhitelistArgs']]]]] = None,\n maintain_end_time: Optional[pulumi.Input[str]] = None,\n maintain_start_time: Optional[pulumi.Input[str]] = None,\n master_node_num: Optional[pulumi.Input[int]] = None,\n payment_type: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[str]] = None,\n port: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n resource_group_id: Optional[pulumi.Input[str]] = None,\n security_ip_lists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n seg_node_num: Optional[pulumi.Input[int]] = None,\n seg_storage_type: Optional[pulumi.Input[str]] = None,\n ssl_enabled: Optional[pulumi.Input[int]] = None,\n status: Optional[pulumi.Input[str]] = None,\n storage_size: Optional[pulumi.Input[int]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n used_time: Optional[pulumi.Input[str]] = None,\n vector_configuration_status: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceState.__new__(_InstanceState)\n\n __props__.__dict__[\"availability_zone\"] = availability_zone\n __props__.__dict__[\"connection_string\"] = connection_string\n __props__.__dict__[\"create_sample_data\"] = create_sample_data\n __props__.__dict__[\"db_instance_category\"] = db_instance_category\n __props__.__dict__[\"db_instance_class\"] = db_instance_class\n __props__.__dict__[\"db_instance_mode\"] = db_instance_mode\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"encryption_key\"] = encryption_key\n __props__.__dict__[\"encryption_type\"] = encryption_type\n __props__.__dict__[\"engine\"] = engine\n __props__.__dict__[\"engine_version\"] = engine_version\n __props__.__dict__[\"instance_charge_type\"] = instance_charge_type\n __props__.__dict__[\"instance_group_count\"] = instance_group_count\n __props__.__dict__[\"instance_network_type\"] = instance_network_type\n __props__.__dict__[\"instance_spec\"] = instance_spec\n __props__.__dict__[\"ip_whitelists\"] = ip_whitelists\n __props__.__dict__[\"maintain_end_time\"] = maintain_end_time\n __props__.__dict__[\"maintain_start_time\"] = maintain_start_time\n __props__.__dict__[\"master_node_num\"] = master_node_num\n __props__.__dict__[\"payment_type\"] = payment_type\n __props__.__dict__[\"period\"] = period\n __props__.__dict__[\"port\"] = port\n __props__.__dict__[\"private_ip_address\"] = private_ip_address\n __props__.__dict__[\"resource_group_id\"] = resource_group_id\n __props__.__dict__[\"security_ip_lists\"] = security_ip_lists\n __props__.__dict__[\"seg_node_num\"] = seg_node_num\n __props__.__dict__[\"seg_storage_type\"] = seg_storage_type\n __props__.__dict__[\"ssl_enabled\"] = ssl_enabled\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"storage_size\"] = storage_size\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"used_time\"] = used_time\n __props__.__dict__[\"vector_configuration_status\"] = vector_configuration_status\n __props__.__dict__[\"vpc_id\"] = vpc_id\n __props__.__dict__[\"vswitch_id\"] = vswitch_id\n __props__.__dict__[\"zone_id\"] = zone_id\n return Instance(resource_name, opts=opts, __props__=__props__)", "def get_with_inventory(self, context, id_):\n try:\n db_resource_data = self.db_api.get_resource(context, id_)\n res_properties = self.db_api.get_properties(context, id_)\n\n # for non resource managers return get\n if (db_resource_data['type'] !=\n eon_const.EON_RESOURCE_TYPE_ESX_CLUSTER):\n return _make_response(db_resource_data)\n\n res_mgr_obj = (\n self.db_api.get_resource_managers_by_resource_id(context,\n id_))\n driver_obj = driver.load_resource_driver(db_resource_data['type'])\n _inventory = driver_obj.get_res_inventory(res_mgr_obj,\n res_properties)\n _resource_data = _make_response(db_resource_data,\n inventory=_inventory)\n # (NOTE) Here setting the details of resource manager for the\n # resource\n _res_mgr_data = _make_response(res_mgr_obj, meta_data=False)\n _resource_data[eon_const.RSRC_MGR_INFO] = _res_mgr_data\n\n except exception.NotFound as e:\n LOG.exception(e)\n raise e\n except Exception as e:\n msg = _(\"Error retrieving the 'eon_resource':%s. Reason: %s\") % (\n id_, e)\n log_msg = (\"Error retrieving the 'eon_resource':%s.\"\n \" Reason: %s\") % (id_, e)\n LOG.exception(log_msg)\n raise exception.RetrieveException(msg)\n\n LOG.info(\"The Resource data %s \"\n % logging.mask_password(_resource_data))\n return _resource_data", "def get_environment(environment_name, project_name):\n\n client = get_api_client()\n payload = {\n \"length\": 250,\n \"offset\": 0,\n \"filter\": \"name=={}\".format(environment_name),\n }\n\n if project_name:\n project = get_project(project_name)\n project_id = project[\"metadata\"][\"uuid\"]\n payload[\"filter\"] += \";project_reference=={}\".format(project_id)\n\n res, err = client.environment.list(payload)\n if err:\n raise Exception(\"[{}] - {}\".format(err[\"code\"], err[\"error\"]))\n\n res = res.json()\n if res[\"metadata\"][\"total_matches\"] == 0:\n LOG.error(\"Environment '{}' not found\".format(environment_name))\n sys.exit(-1)\n\n return res[\"entities\"][0]", "def update_state(state_id):\n request_dict = request.get_json()\n if request_dict is None:\n abort(400, 'Not a JSON')\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n for key, value in request_dict.items():\n if key != 'id' and key != 'created_at' and key != 'updated_at':\n setattr(state, key, value)\n state.save()\n return jsonify(state.to_dict())", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n accept_language: Optional[pulumi.Input[str]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n created_time: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n distributor: Optional[pulumi.Input[str]] = None,\n has_default_path: Optional[pulumi.Input[bool]] = None,\n name: Optional[pulumi.Input[str]] = None,\n owner: Optional[pulumi.Input[str]] = None,\n provisioning_artifact_parameters: Optional[pulumi.Input[pulumi.InputType['ProductProvisioningArtifactParametersArgs']]] = None,\n status: Optional[pulumi.Input[str]] = None,\n support_description: Optional[pulumi.Input[str]] = None,\n support_email: Optional[pulumi.Input[str]] = None,\n support_url: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n type: Optional[pulumi.Input[str]] = None) -> 'Product':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ProductState.__new__(_ProductState)\n\n __props__.__dict__[\"accept_language\"] = accept_language\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"created_time\"] = created_time\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"distributor\"] = distributor\n __props__.__dict__[\"has_default_path\"] = has_default_path\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"owner\"] = owner\n __props__.__dict__[\"provisioning_artifact_parameters\"] = provisioning_artifact_parameters\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"support_description\"] = support_description\n __props__.__dict__[\"support_email\"] = support_email\n __props__.__dict__[\"support_url\"] = support_url\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"type\"] = type\n return Product(resource_name, opts=opts, __props__=__props__)", "def state(self):\n state = self._resource.get('state', self.default_state)\n\n if state in State:\n return state\n else:\n return getattr(State, state)", "def get(self, name: str, version: Optional[str] = None, label: Optional[str] = None) -> Environment:\n if version and label:\n msg = \"Cannot specify both version and label.\"\n raise ValidationException(\n message=msg,\n target=ErrorTarget.ENVIRONMENT,\n no_personal_data_message=msg,\n error_category=ErrorCategory.USER_ERROR,\n error_type=ValidationErrorType.INVALID_VALUE,\n )\n\n if label:\n return _resolve_label_to_asset(self, name, label)\n\n if not version:\n msg = \"Must provide either version or label.\"\n raise ValidationException(\n message=msg,\n target=ErrorTarget.ENVIRONMENT,\n no_personal_data_message=msg,\n error_category=ErrorCategory.USER_ERROR,\n error_type=ValidationErrorType.MISSING_FIELD,\n )\n name = _preprocess_environment_name(name)\n env_version_resource = self._get(name, version)\n\n return Environment._from_rest_object(env_version_resource)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Reservation':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ReservationArgs.__new__(ReservationArgs)\n\n __props__.__dict__[\"concurrency\"] = None\n __props__.__dict__[\"creation_time\"] = None\n __props__.__dict__[\"ignore_idle_slots\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"multi_region_auxiliary\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"reservation_id\"] = None\n __props__.__dict__[\"slot_capacity\"] = None\n __props__.__dict__[\"update_time\"] = None\n return Reservation(resource_name, opts=opts, __props__=__props__)", "def get(resource_name, id, opts=None, arn=None, artifact_store=None, name=None, role_arn=None, stages=None, tags=None):\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n __props__[\"arn\"] = arn\n __props__[\"artifact_store\"] = artifact_store\n __props__[\"name\"] = name\n __props__[\"role_arn\"] = role_arn\n __props__[\"stages\"] = stages\n __props__[\"tags\"] = tags\n return Pipeline(resource_name, opts=opts, __props__=__props__)", "def get(\n id: int = typer.Argument(1),\n ip: str = typer.Option(..., \"--ip\", \"-i\", envvar=\"HUE_BRIDGE_IP\"),\n user: str = typer.Option(..., \"--user\", \"-u\", envvar=\"HUE_BRIDGE_USER\"),\n):\n light = Light(id, ip=ip, user=user)\n resp = asyncio.run(light.get_state())\n console.print(f\"[{ip}] Light {id} State:\\n{json.dumps(resp, indent=2)}\")", "def get_state(hass, utc_point_in_time, entity_id, run=None):\n states = get_states(hass, utc_point_in_time, (entity_id,), run)\n return states[0] if states else None", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n config: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n metadata: Optional[pulumi.Input[pulumi.InputType['SyntheticsPrivateLocationMetadataArgs']]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'SyntheticsPrivateLocation':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _SyntheticsPrivateLocationState.__new__(_SyntheticsPrivateLocationState)\n\n __props__.__dict__[\"config\"] = config\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"metadata\"] = metadata\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"tags\"] = tags\n return SyntheticsPrivateLocation(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Service':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ServiceArgs.__new__(ServiceArgs)\n\n __props__.__dict__[\"correlation_scheme\"] = None\n __props__.__dict__[\"default_move_cost\"] = None\n __props__.__dict__[\"etag\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"partition_description\"] = None\n __props__.__dict__[\"placement_constraints\"] = None\n __props__.__dict__[\"provisioning_state\"] = None\n __props__.__dict__[\"service_dns_name\"] = None\n __props__.__dict__[\"service_kind\"] = None\n __props__.__dict__[\"service_load_metrics\"] = None\n __props__.__dict__[\"service_package_activation_mode\"] = None\n __props__.__dict__[\"service_placement_policies\"] = None\n __props__.__dict__[\"service_type_name\"] = None\n __props__.__dict__[\"system_data\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"type\"] = None\n return Service(resource_name, opts=opts, __props__=__props__)", "def all_states(state_id=None):\n if request.method == 'GET':\n _states = storage.all(State).values()\n if state_id is None:\n list_states = list()\n for state in _states:\n list_states.append(state.to_dict())\n return jsonify(list_states)\n else:\n list_states = list()\n for state in _states:\n if state.id == state_id:\n list_states.append(state.to_dict())\n return jsonify(list_states[0])\n abort(404)\n if request.method == 'DELETE':\n obj_state = storage.get(State, state_id)\n if obj_state:\n storage.delete(obj_state)\n storage.save()\n return jsonify(dict()), 200\n abort(404)\n if request.method == 'POST':\n try:\n conv_body = request.get_json()\n if 'name' not in conv_body:\n return \"Missing name\\n\", 400\n new_inst = State(name=conv_body.get('name'))\n storage.new(new_inst)\n storage.save()\n return jsonify(new_inst.to_dict()), 201\n except:\n abort(400, description=\"Not a JSON\")\n if request.method == 'PUT':\n new_inst = storage.get(State, state_id)\n if not new_inst:\n abort(404)\n try:\n list_ignore = ['id', 'created_at', 'updated_at']\n conv_body = request.get_json()\n for key, value in conv_body.items():\n if key not in list_ignore:\n setattr(new_inst, key, value)\n new_inst.save()\n return jsonify(new_inst.to_dict()), 200\n except:\n abort(400, description=\"Not a JSON\")", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n api_management_id: Optional[pulumi.Input[str]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None) -> 'Tag':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _TagState.__new__(_TagState)\n\n __props__.__dict__[\"api_management_id\"] = api_management_id\n __props__.__dict__[\"display_name\"] = display_name\n __props__.__dict__[\"name\"] = name\n return Tag(resource_name, opts=opts, __props__=__props__)", "def get_state_machine(self, name):\n response = self.client.list_state_machines()\n print(response)\n if not response.get('stateMachines'):\n return None\n for sm in response.get('stateMachines'):\n if sm['name'] == name:\n return sm['stateMachineArn']", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Assessment':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n\n __props__[\"additional_data\"] = None\n __props__[\"display_name\"] = None\n __props__[\"links\"] = None\n __props__[\"metadata\"] = None\n __props__[\"name\"] = None\n __props__[\"partners_data\"] = None\n __props__[\"resource_details\"] = None\n __props__[\"status\"] = None\n __props__[\"type\"] = None\n return Assessment(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Organization':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = OrganizationArgs.__new__(OrganizationArgs)\n\n __props__.__dict__[\"arn\"] = None\n __props__.__dict__[\"feature_set\"] = None\n __props__.__dict__[\"management_account_arn\"] = None\n __props__.__dict__[\"management_account_email\"] = None\n __props__.__dict__[\"management_account_id\"] = None\n __props__.__dict__[\"root_id\"] = None\n return Organization(resource_name, opts=opts, __props__=__props__)", "def get_state(self, cell_id: int) -> State:\n return self.states[cell_id]", "def getState():\n engine = create_engine(\n 'mysql+mysqldb://{}:{}@localhost:3306/{}'.format(\n sys.argv[1],\n sys.argv[2],\n sys.argv[3]),\n pool_pre_ping=True)\n Base.metadata.create_all(engine)\n\n Session = sessionmaker(bind=engine)\n session = Session()\n\n new_states = State(name='Louisiana')\n session.add(new_states)\n\n for state in session.query(State).order_by(State.id).all():\n if state.name == \"Louisiana\":\n print(\"{}\".format(state.id))\n\n session.commit()\n session.close()", "def early_exit_desired_state(*args, **kwargs) -> dict[str, Any]:\n\n def add_account_identity(acc):\n acc[IDENTIFIER_FIELD_NAME] = acc[\"path\"]\n return acc\n\n def add_role_identity(role):\n role[IDENTIFIER_FIELD_NAME] = role[\"name\"]\n return role\n\n return {\n \"accounts\": [\n add_account_identity(a)\n for a in queries.get_aws_accounts(terraform_state=True)\n ],\n \"roles\": [add_role_identity(r) for r in get_tf_roles()],\n }", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = InstanceArgs.__new__(InstanceArgs)\n\n __props__.__dict__[\"authorized_network\"] = None\n __props__.__dict__[\"create_time\"] = None\n __props__.__dict__[\"discovery_endpoint\"] = None\n __props__.__dict__[\"display_name\"] = None\n __props__.__dict__[\"instance_id\"] = None\n __props__.__dict__[\"instance_messages\"] = None\n __props__.__dict__[\"labels\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"maintenance_policy\"] = None\n __props__.__dict__[\"maintenance_schedule\"] = None\n __props__.__dict__[\"memcache_full_version\"] = None\n __props__.__dict__[\"memcache_nodes\"] = None\n __props__.__dict__[\"memcache_version\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"node_config\"] = None\n __props__.__dict__[\"node_count\"] = None\n __props__.__dict__[\"parameters\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"state\"] = None\n __props__.__dict__[\"update_available\"] = None\n __props__.__dict__[\"update_time\"] = None\n __props__.__dict__[\"zones\"] = None\n return Instance(resource_name, opts=opts, __props__=__props__)", "def get_scene(self, name=None, id=None):\n\n if(name):\n return self.scenes[name] if name in self.scenes else None\n if(id):\n return next((v for (k,v) in self.scenes.items() if v.id == id), None)\n return None", "def from_esi_name(cls, esi_state_name: str) -> \"StructureService.State\":\n STATES_ESI_MAP = {\"offline\": cls.OFFLINE, \"online\": cls.ONLINE}\n return (\n STATES_ESI_MAP[esi_state_name]\n if esi_state_name in STATES_ESI_MAP\n else cls.OFFLINE\n )", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = InstanceArgs.__new__(InstanceArgs)\n\n __props__.__dict__[\"create_time\"] = None\n __props__.__dict__[\"description\"] = None\n __props__.__dict__[\"etag\"] = None\n __props__.__dict__[\"file_shares\"] = None\n __props__.__dict__[\"instance_id\"] = None\n __props__.__dict__[\"kms_key_name\"] = None\n __props__.__dict__[\"labels\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"networks\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"satisfies_pzs\"] = None\n __props__.__dict__[\"state\"] = None\n __props__.__dict__[\"status_message\"] = None\n __props__.__dict__[\"suspension_reasons\"] = None\n __props__.__dict__[\"tier\"] = None\n return Instance(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'ResolverConfig':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ResolverConfigArgs.__new__(ResolverConfigArgs)\n\n __props__.__dict__[\"autodefined_reverse\"] = None\n __props__.__dict__[\"autodefined_reverse_flag\"] = None\n __props__.__dict__[\"owner_id\"] = None\n __props__.__dict__[\"resource_id\"] = None\n return ResolverConfig(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n access_string: Optional[pulumi.Input[str]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n authentication_mode: Optional[pulumi.Input[pulumi.InputType['UserAuthenticationModeArgs']]] = None,\n engine: Optional[pulumi.Input[str]] = None,\n no_password_required: Optional[pulumi.Input[bool]] = None,\n passwords: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n user_id: Optional[pulumi.Input[str]] = None,\n user_name: Optional[pulumi.Input[str]] = None) -> 'User':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _UserState.__new__(_UserState)\n\n __props__.__dict__[\"access_string\"] = access_string\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"authentication_mode\"] = authentication_mode\n __props__.__dict__[\"engine\"] = engine\n __props__.__dict__[\"no_password_required\"] = no_password_required\n __props__.__dict__[\"passwords\"] = passwords\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"user_id\"] = user_id\n __props__.__dict__[\"user_name\"] = user_name\n return User(resource_name, opts=opts, __props__=__props__)", "def states_id_list(id):\n # Bring dictionary from storage.\n models = storage.all(State)\n template = '9-states.html'\n\n if \"State.{}\".format(id) in models:\n state = models.pop(\"State.{}\".format(id))\n else:\n return (\"<h1>Not found!</h1>\")\n\n # Return with the jinja template.\n return render_template(template, state=state)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n acl_name: Optional[pulumi.Input[str]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n auto_minor_version_upgrade: Optional[pulumi.Input[bool]] = None,\n cluster_endpoints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterClusterEndpointArgs']]]]] = None,\n data_tiering: Optional[pulumi.Input[bool]] = None,\n description: Optional[pulumi.Input[str]] = None,\n engine_patch_version: Optional[pulumi.Input[str]] = None,\n engine_version: Optional[pulumi.Input[str]] = None,\n final_snapshot_name: Optional[pulumi.Input[str]] = None,\n kms_key_arn: Optional[pulumi.Input[str]] = None,\n maintenance_window: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n name_prefix: Optional[pulumi.Input[str]] = None,\n node_type: Optional[pulumi.Input[str]] = None,\n num_replicas_per_shard: Optional[pulumi.Input[int]] = None,\n num_shards: Optional[pulumi.Input[int]] = None,\n parameter_group_name: Optional[pulumi.Input[str]] = None,\n port: Optional[pulumi.Input[int]] = None,\n security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n shards: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterShardArgs']]]]] = None,\n snapshot_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n snapshot_name: Optional[pulumi.Input[str]] = None,\n snapshot_retention_limit: Optional[pulumi.Input[int]] = None,\n snapshot_window: Optional[pulumi.Input[str]] = None,\n sns_topic_arn: Optional[pulumi.Input[str]] = None,\n subnet_group_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tls_enabled: Optional[pulumi.Input[bool]] = None) -> 'Cluster':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ClusterState.__new__(_ClusterState)\n\n __props__.__dict__[\"acl_name\"] = acl_name\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"auto_minor_version_upgrade\"] = auto_minor_version_upgrade\n __props__.__dict__[\"cluster_endpoints\"] = cluster_endpoints\n __props__.__dict__[\"data_tiering\"] = data_tiering\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"engine_patch_version\"] = engine_patch_version\n __props__.__dict__[\"engine_version\"] = engine_version\n __props__.__dict__[\"final_snapshot_name\"] = final_snapshot_name\n __props__.__dict__[\"kms_key_arn\"] = kms_key_arn\n __props__.__dict__[\"maintenance_window\"] = maintenance_window\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"name_prefix\"] = name_prefix\n __props__.__dict__[\"node_type\"] = node_type\n __props__.__dict__[\"num_replicas_per_shard\"] = num_replicas_per_shard\n __props__.__dict__[\"num_shards\"] = num_shards\n __props__.__dict__[\"parameter_group_name\"] = parameter_group_name\n __props__.__dict__[\"port\"] = port\n __props__.__dict__[\"security_group_ids\"] = security_group_ids\n __props__.__dict__[\"shards\"] = shards\n __props__.__dict__[\"snapshot_arns\"] = snapshot_arns\n __props__.__dict__[\"snapshot_name\"] = snapshot_name\n __props__.__dict__[\"snapshot_retention_limit\"] = snapshot_retention_limit\n __props__.__dict__[\"snapshot_window\"] = snapshot_window\n __props__.__dict__[\"sns_topic_arn\"] = sns_topic_arn\n __props__.__dict__[\"subnet_group_name\"] = subnet_group_name\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"tls_enabled\"] = tls_enabled\n return Cluster(resource_name, opts=opts, __props__=__props__)", "def get_state(self, run_id):\n raise NotImplementedError()", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n auto_devops_enabled: Optional[pulumi.Input[bool]] = None,\n avatar: Optional[pulumi.Input[str]] = None,\n avatar_hash: Optional[pulumi.Input[str]] = None,\n avatar_url: Optional[pulumi.Input[str]] = None,\n default_branch_protection: Optional[pulumi.Input[int]] = None,\n description: Optional[pulumi.Input[str]] = None,\n emails_disabled: Optional[pulumi.Input[bool]] = None,\n extra_shared_runners_minutes_limit: Optional[pulumi.Input[int]] = None,\n full_name: Optional[pulumi.Input[str]] = None,\n full_path: Optional[pulumi.Input[str]] = None,\n ip_restriction_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n lfs_enabled: Optional[pulumi.Input[bool]] = None,\n membership_lock: Optional[pulumi.Input[bool]] = None,\n mentions_disabled: Optional[pulumi.Input[bool]] = None,\n name: Optional[pulumi.Input[str]] = None,\n parent_id: Optional[pulumi.Input[int]] = None,\n path: Optional[pulumi.Input[str]] = None,\n prevent_forking_outside_group: Optional[pulumi.Input[bool]] = None,\n project_creation_level: Optional[pulumi.Input[str]] = None,\n request_access_enabled: Optional[pulumi.Input[bool]] = None,\n require_two_factor_authentication: Optional[pulumi.Input[bool]] = None,\n runners_token: Optional[pulumi.Input[str]] = None,\n share_with_group_lock: Optional[pulumi.Input[bool]] = None,\n shared_runners_minutes_limit: Optional[pulumi.Input[int]] = None,\n subgroup_creation_level: Optional[pulumi.Input[str]] = None,\n two_factor_grace_period: Optional[pulumi.Input[int]] = None,\n visibility_level: Optional[pulumi.Input[str]] = None,\n web_url: Optional[pulumi.Input[str]] = None) -> 'Group':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _GroupState.__new__(_GroupState)\n\n __props__.__dict__[\"auto_devops_enabled\"] = auto_devops_enabled\n __props__.__dict__[\"avatar\"] = avatar\n __props__.__dict__[\"avatar_hash\"] = avatar_hash\n __props__.__dict__[\"avatar_url\"] = avatar_url\n __props__.__dict__[\"default_branch_protection\"] = default_branch_protection\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"emails_disabled\"] = emails_disabled\n __props__.__dict__[\"extra_shared_runners_minutes_limit\"] = extra_shared_runners_minutes_limit\n __props__.__dict__[\"full_name\"] = full_name\n __props__.__dict__[\"full_path\"] = full_path\n __props__.__dict__[\"ip_restriction_ranges\"] = ip_restriction_ranges\n __props__.__dict__[\"lfs_enabled\"] = lfs_enabled\n __props__.__dict__[\"membership_lock\"] = membership_lock\n __props__.__dict__[\"mentions_disabled\"] = mentions_disabled\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"parent_id\"] = parent_id\n __props__.__dict__[\"path\"] = path\n __props__.__dict__[\"prevent_forking_outside_group\"] = prevent_forking_outside_group\n __props__.__dict__[\"project_creation_level\"] = project_creation_level\n __props__.__dict__[\"request_access_enabled\"] = request_access_enabled\n __props__.__dict__[\"require_two_factor_authentication\"] = require_two_factor_authentication\n __props__.__dict__[\"runners_token\"] = runners_token\n __props__.__dict__[\"share_with_group_lock\"] = share_with_group_lock\n __props__.__dict__[\"shared_runners_minutes_limit\"] = shared_runners_minutes_limit\n __props__.__dict__[\"subgroup_creation_level\"] = subgroup_creation_level\n __props__.__dict__[\"two_factor_grace_period\"] = two_factor_grace_period\n __props__.__dict__[\"visibility_level\"] = visibility_level\n __props__.__dict__[\"web_url\"] = web_url\n return Group(resource_name, opts=opts, __props__=__props__)", "def get_state(self) -> str:\n url = f\"{self.ha_url}/api/states/{self.entity_id}\"\n\n req = urllib.request.Request(url=url, headers=self.headers)\n with urllib.request.urlopen(req) as r:\n response = r.read().decode(\"utf\")\n return json.loads(response)[\"state\"]" ]
[ "0.6518575", "0.61676663", "0.615199", "0.6109608", "0.6109441", "0.60900664", "0.6072669", "0.5931966", "0.58831435", "0.5837981", "0.5818557", "0.58057237", "0.5786659", "0.5786354", "0.5778798", "0.5712893", "0.56756985", "0.5640747", "0.5597875", "0.5522777", "0.54472834", "0.5421105", "0.53973055", "0.53666955", "0.5356236", "0.53431284", "0.5337101", "0.5330727", "0.53002524", "0.5264817", "0.52409554", "0.5236409", "0.5203882", "0.51445156", "0.51287025", "0.51117516", "0.50366724", "0.50341666", "0.503394", "0.5024863", "0.49514914", "0.49472666", "0.4937974", "0.49374893", "0.49079597", "0.49067", "0.4905783", "0.49005863", "0.4884713", "0.48724687", "0.4866147", "0.48549452", "0.48507008", "0.48462453", "0.4840134", "0.48375142", "0.48252225", "0.481979", "0.48047793", "0.47957268", "0.47792864", "0.4776207", "0.4768087", "0.4765428", "0.47534257", "0.47461507", "0.47364134", "0.4726452", "0.4723947", "0.47216025", "0.47188118", "0.4707991", "0.470351", "0.46940824", "0.46828717", "0.46781892", "0.4670604", "0.46630615", "0.46581277", "0.46555063", "0.46514916", "0.4639116", "0.46378773", "0.46378592", "0.46348283", "0.4632734", "0.46325877", "0.46229342", "0.4622617", "0.4620302", "0.46144783", "0.4604933", "0.46033233", "0.46029952", "0.46015802", "0.45907685", "0.45905864", "0.45884925", "0.4588421", "0.4587705" ]
0.6545722
0